repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
Creatide/RenderFarts
|
src/__init__.py
|
<filename>src/__init__.py
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name" : "RenderFarts",
"author" : "<NAME>",
"description" : "Render image in parts.",
"blender" : (2, 80, 0),
"version" : (0, 0, 6),
"location" : "Properties > Render > RenderFarts",
"warning" : "",
"category" : "Render"
}
# ------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------
import os, webbrowser
from datetime import datetime
import numpy as np
import bpy
from bpy.app.handlers import persistent
from bpy.types import Operator, Panel, UIList, PropertyGroup
from bpy.props import StringProperty, IntProperty, BoolProperty, PointerProperty, CollectionProperty
# ------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------
# RenderChunk object to store data for image parts
class RF_RenderPart():
def __init__(self, name, border_min_x, border_max_x, border_min_y, border_max_y):
self.name = name
self.border_min_x = border_min_x
self.border_max_x = border_max_x
self.border_min_y = border_min_y
self.border_max_y = border_max_y
render_parts = []
# Utilities
# ----------------------------------------------------
class RF_Utils():
# List all class methods with or without dunders
# Or just use python dir() e.g: print(dir(imbuf))
def list_class_methods(cls, dunders_included = True):
method_list = None
if dunders_included is True:
method_list = [func for func in dir(cls) if callable(getattr(cls, func))]
else:
method_list = [func for func in dir(cls) if callable(getattr(cls, func)) and not func.startswith("__")]
print(method_list)
return method_list
# List all object attributes
def list_object_attributes(obj):
for att in dir(obj):
print (att, getattr(obj,att))
def flat_list(l, iteration=1):
flattened_list = l
try:
for i in range(iteration):
flattened_list = [y for x in flattened_list for y in x]
return flattened_list
except Exception as e:
print(e)
# Valid file name by parsing out illegal / invalid chars
def validate_filename(name): # could reuse for other presets
for char in " !@#$%^&*(){}:\";'[]<>,.\\/?":
name = name.replace(char, '_')
return name.lower().strip()
# Get the directory of the currently-executing addon
def get_script_dir():
script_file = os.path.realpath(__file__)
return os.path.dirname(script_file)
# Get currently opened blend file folder
def get_blend_file_folder():
filepath = bpy.data.filepath
directory = os.path.dirname(filepath)
#directory_path = os.path.join(directory, bl_info['name'])
return directory
# Get list of rendered files filenames
# https://blenderartists.org/t/how-to-read-files-name-in-current-folder/1117301/3
def get_files_in_folder(path, file_extension = True):
path = os.path.realpath(bpy.path.abspath(path))
render_files = []
for root, dirs, files in os.walk(path):
# Limit file scan only to selected directory
del dirs[:]
for file in files:
if (file.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif'))):
if file_extension is not True:
# Remove file extension if needed
render_files.append(os.path.splitext(os.path.basename(file))[0])
else:
render_files.append(file)
return render_files
# Refresh render list with found image filenames
def refresh_render_list(scene):
# Clear render_list and add image filenames to it
render_files = RF_Utils.get_files_in_folder(scene.render_settings.render_folder)
scene.render_list.clear()
prefix = str(scene.render_settings.filename_prefix).replace(" ", "")
for file in render_files:
# Add only names that starts with 'prefix' to list
if str(file).startswith(prefix):
item = scene.render_list.add()
item.image_name = file
# Update rendered parts value in UI
scene.render_settings.rendered_parts_count = len(render_files)
# Check if all parts rendered
if (len(scene.render_list) == scene.render_settings.total_parts_count):
scene.render_settings.all_parts_rendered = True
else:
scene.render_settings.all_parts_rendered = False
# Refresh render parts for rendering process to get not-rendered images based on image files
def refresh_render_parts(scene):
rndr = scene.render
parts_count = scene.render_settings.parts_count
scene.render_settings.total_parts_count = parts_count * parts_count
leading_zeros = len(str(scene.render_settings.total_parts_count)) - 1
RF_Utils.refresh_render_list(scene)
render_parts.clear()
for row in range(parts_count):
for column in range(parts_count):
filename = scene.render_settings.filename_prefix + "_{}_{}".format(str(row + 1).zfill(leading_zeros), str(column + 1).zfill(leading_zeros))
if not any(filename == os.path.splitext(listitem.image_name)[0] for listitem in scene.render_list):
border_min_x = (1 / parts_count) * row
border_max_x = (1 / parts_count) * (row + 1)
border_min_y = (1 / parts_count) * column
border_max_y = (1 / parts_count) * (column + 1)
temp_part = RF_RenderPart(filename, border_min_x, border_max_x, border_min_y, border_max_y)
render_parts.append(temp_part)
# Create dummy image file for reserving image slot
# before rendering huge images with multiple computers.
# It's not bulletproof but still can prevent multiple
# instance of same image rendering process.
def create_dummy_image(image_name, image_format, path):
filepath = os.path.realpath(bpy.path.abspath(path)) + '.' + str(image_format).lower()
open(filepath, 'a').close()
# Check if all image parts is reandered and return valid list
def get_all_image_parts(context, file_extension = True):
scene = context.scene
rendered_images = RF_Utils.get_files_in_folder(scene.render_settings.render_folder)
return rendered_images
# Merge all image parts to final image
def merge_image_parts(context):
scene = context.scene
rndr = scene.render
RF_Utils.refresh_render_list(scene)
if scene.render_settings.all_parts_rendered is False or scene.render_settings.crop_border is False or scene.render_settings.parts_count % 2 != 0:
RF_Utils.show_message_box("The requirements for the merge process are not met", "Unable to Start Merge Process", "ERROR")
return False
# Get all rendered images from list
rendered_images = []
for item in scene.render_list:
rendered_images.append(item.image_name)
parts_count = scene.render_settings.parts_count
total_parts_count = scene.render_settings.total_parts_count = parts_count * parts_count
# Read all images to array of images in pixels
if rendered_images:
RF_Utils.show_message_box("It may take some time to merge the images and Blender will be frozen for the duration of the process...", "Merge Process Started", "ERROR")
# Render settings
final_image_name = 'FINAL_EPIC_' + scene.render_settings.filename_prefix + rndr.file_extension
final_image_filepath = os.path.join(scene.render_settings.render_folder, final_image_name)
final_resolution_multiplier = rndr.resolution_percentage / 100
final_image_width = int(rndr.resolution_x * final_resolution_multiplier)
final_image_height = int(rndr.resolution_y * final_resolution_multiplier)
part_width = int(round(final_image_width / parts_count))
part_height = int(round(final_image_height / parts_count))
# Parse out numbers from names
# name_numbers = []
# for image in rendered_images:
# num = image.split('.')[0]
# name_numbers.append([num.split('_')[1], num.split('_')[2]])
# Order rendered part names to row-major order
rendered_images_ordered = []
for y in range(parts_count, 0, -1):
for x in range(parts_count):
rendered_images_ordered.append(rendered_images[(y-1)+(x*parts_count)])
# Get all pixels from image parts
part_pixels = []
for image in rendered_images_ordered:
filepath = os.path.join(scene.render_settings.render_folder, image)
filepath = os.path.realpath(bpy.path.abspath(filepath))
loaded_pixels = list(bpy.data.images.load(filepath, check_existing=False).pixels)
part_pixels.append([loaded_pixels[ipx:ipx+4] for ipx in range(0, len(loaded_pixels), 4)])
try:
# Create final pixel array by loopin all image parts pixels
# https://stackoverflow.com/q/60188880/1629596
final_image_pixels = []
for i in range(parts_count, 0, -1):
for row in range(part_height):
part_switch = -1
px_counter = -1
for col in range(final_image_width):
if col % part_width == 0:
part_switch += 1
px_counter = 0
if i-1 == 0:
px_arr = i-1 + part_switch
else:
px_arr = ((i-1) * parts_count) + part_switch
target_pixel = row * part_width + px_counter
final_image_pixels.append(part_pixels[px_arr][target_pixel])
px_counter += 1
except Exception as e:
excepName = type(e).__name__
RF_Utils.show_message_box("Cannot merge images properly: " + excepName, "Merge Failed", "ERROR")
print(e)
final_image_pixels = RF_Utils.flat_list(final_image_pixels, 1)
# DEBUG: Create text file from data
# for line in final_image_pixels:
# print(line, file=open("D:\\" + bl_info['name'] + "_Pixels.txt", "a"))
if len(final_image_pixels) == final_image_width * final_image_height * 4:
try:
# Save output image
output_image = bpy.data.images.new(final_image_name, alpha=True, width=final_image_width, height=final_image_height)
output_image.alpha_mode = 'STRAIGHT'
output_image.pixels = final_image_pixels
output_image.filepath_raw = final_image_filepath
output_image.file_format = scene.render.image_settings.file_format
output_image.save()
# Open folder when merge complete
path = os.path.realpath(bpy.path.abspath(scene.render_settings.render_folder))
webbrowser.open('file:///' + path)
except Exception as e:
excepName = type(e).__name__
RF_Utils.show_message_box("Cannot merge images properly: " + excepName, "Merge Failed", "ERROR")
print(e)
# Show pop-up message window for user
def show_message_box(message = "", title = "Message", icon = 'INFO'):
def draw(self, context):
self.layout.label(text=message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
# ------------------------------------------------------------------------
# Properties (_PROP_)
# ------------------------------------------------------------------------
class RF_PROP_RenderSettings (PropertyGroup):
render_folder: StringProperty(
name="Render Folder",
description="Choose a Output Folder for " + bl_info['name'],
default="//",
maxlen=1024,
subtype='DIR_PATH'
)
filename_prefix: StringProperty(
name="Filename Prefix",
description="Identification prefix string for " + bl_info['name'] + " filename",
default="Fart"
)
parts_count: IntProperty(
name="Parts Count",
description="Total Parts Count (e.g. 4 x 4 = 16)",
default=4,
min=1
)
rendered_parts_count: IntProperty(
name="Rendered Parts Count",
default=0,
min=0
)
total_parts_count: IntProperty(
name="Total Parts Count",
description="Total parts count (e.g. 4 x 4 = 16)",
default=0,
min=1
)
crop_border: BoolProperty(
name="Crop to Render Region",
description="Crop Render to Parts",
default=True
)
overwrite_files: BoolProperty(
name="Overwrite Images",
description="Overwrite existing image files",
default=False
)
show_render_window: BoolProperty(
name="Show Render Window",
description="Show render window while rendering",
default=False
)
stop_rendering: BoolProperty(
name="Rendering in Progress",
default=False
)
all_parts_rendered: BoolProperty(
name="All parts rendered",
default=False
)
class RF_PROP_RenderListItem (PropertyGroup):
image_name: StringProperty()
image_id: IntProperty()
# ------------------------------------------------------------------------
# Operators (_OT_)
# ------------------------------------------------------------------------
# OT: Initialize
# ----------------------------------------------------
class RF_OT_Init(Operator):
bl_label = "Initialize " + bl_info['name']
bl_idname = "rp.init"
bl_description = "Initialize " + bl_info['name'] + " addon"
bl_options = {'INTERNAL'}
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
print(self.bl_label)
scene = context.scene
#scene.render_settings.render_folder = scene.render.filepath
RF_Utils.refresh_render_list(scene)
return{'FINISHED'}
# OT: Start Rendering Images/Chunks
# ----------------------------------------------------
# https://blender.stackexchange.com/a/153254/497
class RF_OT_StartRender(Operator):
bl_label = "Start Rendering"
bl_idname = "rp.start_render"
bl_description = "Start Rendering Process"
_timer = None
stop = None
rendering = None
render_complete = None
'''
# Disable/enable button
@classmethod
def poll(self, context):
return context.scene.render_settings.all_parts_rendered == False
'''
def pre(self, dummy, event):
self.render_complete = False
self.rendering = True
def post(self, dummy, event):
self.rendering = False
def complete(self, dummy, event):
#print('RENDER COMPLETE')
self.render_complete = True
def cancelled(self, dummy, event):
print('RENDER CANCELLED')
self.stop = True
def remove_handlers(self, context, event):
bpy.app.handlers.render_pre.remove(self.pre)
bpy.app.handlers.render_post.remove(self.post)
bpy.app.handlers.render_complete.remove(self.complete)
bpy.app.handlers.render_cancel.remove(self.cancelled)
context.window_manager.event_timer_remove(self._timer)
def execute(self, context):
print(self.bl_label)
context.scene.render_settings.stop_rendering = False
# Define the variables during execution. This allows to define when called from a button
self.stop = False
self.rendering = False
self.render_complete = True
bpy.app.handlers.render_pre.append(self.pre)
bpy.app.handlers.render_post.append(self.post)
bpy.app.handlers.render_complete.append(self.complete)
bpy.app.handlers.render_cancel.append(self.cancelled)
# The timer gets created and the modal handler is added to the window manager
self._timer = context.window_manager.event_timer_add(0.5, window=context.window)
context.window_manager.modal_handler_add(self)
return {"RUNNING_MODAL"}
def modal(self, context, event):
scene = context.scene
rndr = scene.render
if event.type in {'ESC'} or scene.render_settings.stop_rendering is True:
self.remove_handlers(context, event)
self.report({'WARNING'}, 'User interuption')
RF_Utils.show_message_box("Render will be stop after the current frame finishes", self.bl_description, "ERROR")
return {'FINISHED'}
# This event is signaled and will start the render if available
if event.type == 'TIMER':
# Nothing is currently rendering. Proceed to render.
if self.render_complete is True and self.rendering is False and scene.render_settings.stop_rendering is False:
RF_Utils.refresh_render_parts(scene)
# If cancelled or no more chunks to render, finish.
if True in (not render_parts, self.stop is True):
# We remove the handlers and the modal timer to clean everything
self.remove_handlers(context, event)
return {"FINISHED"}
try:
# Setup active chunk and filepath
chunk = render_parts[0]
filepath = os.path.join(scene.render_settings.render_folder, chunk.name)
rndr.filepath = filepath
# Setup border sizes
rndr.border_min_x = chunk.border_min_x
rndr.border_max_x = chunk.border_max_x
rndr.border_min_y = chunk.border_min_y
rndr.border_max_y = chunk.border_max_y
rndr.use_border = True
rndr.use_crop_to_border = scene.render_settings.crop_border
if scene.render_settings.show_render_window is True:
# TODO: Render image skips last dummy image while render window is active.
#RF_Utils.create_dummy_image(chunk.name, rndr.image_settings.file_format, filepath)
bpy.ops.render.render("INVOKE_DEFAULT", write_still=True)
else:
# Create small dummy image before rendering to prevent multiple renderings
RF_Utils.create_dummy_image(chunk.name, rndr.image_settings.file_format, filepath)
bpy.ops.render.render(write_still=True)
except Exception as e:
excepName = type(e).__name__
RF_Utils.show_message_box(str(e)[:-1], "Render Failed", "ERROR")
return {"FINISHED"}
return {"PASS_THROUGH"}
# OT: Stop Rendering Process
# ----------------------------------------------------
class RF_OT_StopRender(Operator):
bl_label = "Stop Rendering"
bl_idname = "rp.stop_render"
bl_description = "Stop the rendering process"
# Disable/enable button
@classmethod
def poll(self, context):
return context.scene.render_settings.rendered_parts_count > 0
def execute(self, context):
scene = context.scene
print(self.bl_label)
scene.render_settings.stop_rendering = True
RF_Utils.show_message_box("Render will be stop after the current frame finishes", self.bl_description, "ERROR")
return{'FINISHED'}
# OT: Refresh Render List
# ----------------------------------------------------
class RF_OT_RefreshList(Operator):
bl_label = "Refresh List"
bl_idname = "rp.refresh_list"
bl_description = "Refresh rendering list"
def execute(self, context):
print(self.bl_label)
scene = context.scene
RF_Utils.refresh_render_list(scene)
return{'FINISHED'}
# OT: Open Render Folder
# ----------------------------------------------------
class RF_OT_OpenRenderFolder(Operator):
bl_label = "Open Folder"
bl_idname = "rp.open_render_folder"
bl_description = "Open render folder"
def execute(self, context):
scene = context.scene
print(self.bl_label + ': ' + scene.render_settings.render_folder)
# path = os.path.realpath(scene.render_settings.render_folder)
path = os.path.realpath(bpy.path.abspath(scene.render_settings.render_folder))
webbrowser.open('file:///' + path)
return{'FINISHED'}
# OT: Reset Render Border
# ----------------------------------------------------
class RF_OT_ResetBorder(Operator):
bl_label = "Reset Render Border"
bl_idname = "rp.reset_border"
bl_description = "Reset Render Border to the current camera resolution"
def execute(self, context):
print(self.bl_label)
scene = context.scene
rndr = scene.render
rndr.border_min_x = 0
rndr.border_min_y = 0
rndr.border_max_x = rndr.resolution_x
rndr.border_max_y = rndr.resolution_y
return{'FINISHED'}
# OT: Merge Images
# ----------------------------------------------------
class RF_OT_MergeImages(Operator):
bl_label = "Merge Images"
bl_idname = "rp.merge_images"
bl_description = "Requirements: \n1. ALL parts must be rendered.\n2. Rendered with \"Crop to Render Region\" turned ON.\n3. Even numbers in the image size\n4. Even number in the \"Parts Count\""
# Disable button for cropped version because it's not work properly yet
@classmethod
def poll(self, context):
return context.scene.render_settings.crop_border is True
def execute(self, context):
scene = context.scene
print(self.bl_label)
RF_Utils.merge_image_parts(context)
return{'FINISHED'}
# ------------------------------------------------------------------------
# Panel (_PT_)
# ------------------------------------------------------------------------
class RF_PT_Panel (Panel):
bl_label = bl_info['name']
bl_idname = "RF_PT_Panel"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
scene = context.scene
#layout.operator("rp.init_renderparts", icon="FILE_REFRESH")
# Render
row = layout.row()
row.label(text="Render:")
row = layout.row()
box = row.box()
row.scale_y = 1.5
#row.scale_x = 1.5
box.alignment = 'CENTER'
box.operator("rp.start_render", icon="RENDER_STILL")
box.operator("rp.stop_render", icon="X")
# Settings
row = layout.row()
row.label(text="Settings:")
row = layout.row()
box = row.box()
box.prop(scene.render_settings, "render_folder")
box.prop(scene.render_settings, "parts_count")
box.prop(scene.render_settings, "crop_border")
box.prop(scene.render_settings, "show_render_window")
#box.prop(scene.render_settings, "overwrite_files")
# Rendering Process
row = layout.row()
row.label(text="Rendering Process:")
row = layout.row()
box = row.box()
box.alignment = 'RIGHT'
if (scene.render_settings.rendered_parts_count == 0):
process_counter = '0'
else:
process_counter = str(scene.render_settings.rendered_parts_count) + ' / ' + str(scene.render_settings.total_parts_count)
box.label(text="Rendered Parts: " + process_counter)
box.template_list("RF_UL_RenderList", "", scene, "render_list", scene, "render_list_index", rows=3, maxrows=3)
# Buttons
box.operator("rp.refresh_list", icon="FILE_REFRESH")
box.operator("rp.open_render_folder", icon="FILE_FOLDER")
box.operator("rp.reset_border", icon="SELECT_SET")
# Merge Images
row = layout.row()
#TODO: Merge images feature not work properly
row.label(text="Finalize Image:")
row = layout.row()
box = row.box()
row.scale_y = 1.5
box.operator("rp.merge_images", icon="FILE_IMAGE")
row = layout.row()
row.alignment = 'RIGHT'
row.label(text=bl_info['name'] + ' - ' + str(bl_info['version']).strip('()'))
# ------------------------------------------------------------------------
# UIList (_UL_)
# ------------------------------------------------------------------------
class RF_UL_RenderList (UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
split = layout.split(factor=0.7)
split.label(text=item.image_name, icon = 'IMAGE_DATA')
# ------------------------------------------------------------------------
# Registration
# ------------------------------------------------------------------------
@persistent
def init_renderparts_member(dummy):
bpy.ops.rp.init('INVOKE_DEFAULT')
classes = (
# Operators
RF_OT_Init,
RF_OT_StartRender,
RF_OT_StopRender,
RF_OT_RefreshList,
RF_OT_OpenRenderFolder,
RF_OT_ResetBorder,
RF_OT_MergeImages,
# Panel
RF_PT_Panel,
# List
RF_PROP_RenderSettings,
RF_PROP_RenderListItem,
RF_UL_RenderList,
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
# Custom scene properties
bpy.types.Scene.render_settings = PointerProperty(type = RF_PROP_RenderSettings)
bpy.types.Scene.render_list = CollectionProperty(type = RF_PROP_RenderListItem)
bpy.types.Scene.render_list_index = IntProperty(name = "Index for render_list", default = 0)
# Used for initial image list update
bpy.app.handlers.load_post.append(init_renderparts_member)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
# Custom scene properties
del bpy.types.Scene.render_settings
del bpy.types.Scene.render_list
del bpy.types.Scene.render_list_index
bpy.app.handlers.load_post.remove(init_renderparts_member)
if __name__ == "__main__": register()
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/mpi_estimate.py
|
<filename>_includes/code/02_parallel_jobs/mpi_estimate.py
#!/usr/bin/env python3
import sys
import random
import math
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
def estimate_pi(total_count):
count_inside = 0
for count in range(0, total_count):
x = random.random()
y = random.random()
d = math.sqrt(x*x + y*y)
if d < 1: count_inside += 1
return count_inside
if __name__=='__main__':
n_iterations = 10000
if len(sys.argv) > 1 and rank==1:
n_iterations = int(sys.argv[1])
if rank == 0:
partitions = [ math.ceil(n_iterations/size) for item in range(size)]
else:
partitions = None
partitions = comm.bcast(partitions, root=0)
partitions[rank] = estimate_pi(partitions[rank])
partitions = comm.partitions(data, root=0)
if rank == 0:
total_count = math.ceil(n_iterations/size)
my_pi = 4*sum(counts)/(size*total_count)
print("[using %i cores] pi is %f from %i samples" % (size,my_pi,total_count))
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/mpi_print_hostname.py
|
from mpi4py import MPI
def print_hostname():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
hname = MPI.Get_processor_name()
print("this is rank = %2i (total: %2i) running on %s" % (rank,size,hname))
comm.Barrier()
if __name__ == '__main__':
print_hostname()
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/threads_numpi.py
|
#!/usr/bin/env python3
import sys
import random
import numpy as np
import math
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool as Pool
def estimate_pi(total_count):
x = np.float32(np.random.uniform(size=total_count))
y = np.float32(np.random.uniform(size=total_count))
radii = np.sqrt(x*x + y*y)
filtered = radii[np.where(radii<1.0)]
return len(filtered)
if __name__=='__main__':
ncores = cpu_count()
n_iterations = 10000
if len(sys.argv) > 1:
n_iterations = int(sys.argv[1])
partitions = [ math.ceil(n_iterations/ncores) for item in range(ncores)]
pool = Pool(processes=ncores)
sizeof = np.dtype(np.float32).itemsize
n_iterations = sum(partitions)
counts=pool.map(estimate_pi, partitions)
my_pi = 4*sum(counts)/sum(partitions)
print("[threads version ] required memory %.3f MB" % (n_iterations*sizeof*3/(1024*1024)))
print("[using %3i cores ] pi is %f from %i samples" % (ncores,my_pi,n_iterations))
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/parallel_numpi.py
|
<filename>_includes/code/02_parallel_jobs/parallel_numpi.py
#!/usr/bin/env python3
import sys
import numpy as np
from multiprocessing import cpu_count, Pool
np.random.seed(2017)
def inside_circle(total_count):
x = np.float32(np.random.uniform(size=total_count))
y = np.float32(np.random.uniform(size=total_count))
radii = np.sqrt(x*x + y*y)
count = len(radii[np.where(radii<=1.0)])
return count
def estimate_pi(n_samples,n_cores):
partitions = [ ]
for i in range(n_cores):
partitions.append(int(n_samples/n_cores))
pool = Pool(processes=n_cores)
counts=pool.map(inside_circle, partitions)
total_count = sum(partitions)
return (4.0 * sum(counts) / total_count)
if __name__=='__main__':
ncores = cpu_count()
n_samples = 10000
if len(sys.argv) > 1:
n_samples = int(sys.argv[1])
partitions = [ int(n_samples/ncores) for item in range(ncores)]
sizeof = np.dtype(np.float32).itemsize
my_pi = estimate_pi(n_samples,ncores)
print("[parallel version] required memory %.3f MB" % (n_samples*sizeof*3/(1024*1024)))
print("[using %3i cores ] pi is %f from %i samples" % (ncores,my_pi,n_samples))
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/generate_scrambled_data.py
|
#!/usr/bin/env python3
import sys
import numpy as np
np.random.seed(2017)
def inside_circle(total_count):
x = np.float32(np.random.uniform(size=total_count))
y = np.float32(np.random.uniform(size=total_count))
radii = np.sqrt(x*x + y*y)
count = len(radii[np.where(radii<=1.0)])
return count, x, y
if __name__=='__main__':
n_samples = 4*1024*1024
file_name = "pi_estimate.log"
if "help" in " ".join(sys.argv):
print("usage: generate_scrambled_data.py <optional:file_name>")
print("""\n script generates file <file_name> of 0.5 GB
that contains blocks of random bytes followed
by a newline and an estimate of pi""")
sys.exit(0)
if len(sys.argv) > 1:
file_name = sys.argv[1]
sizeof = np.dtype(np.float32).itemsize
targetsize_byte = .5*1024*1024*1024
string_to_write = ""
loop_count = 0
while len(string_to_write) < targetsize_byte :
count, data, more = inside_circle(n_samples)
string_to_write += str(data.tostring())
string_to_write += str(more.tostring())
pi_estimate = (4.0 * count / n_samples)
string_to_write += ("\n%f\n" % pi_estimate)
if loop_count % 10 == 0:
print(">> %f GB generated" % (len(string_to_write)/(1024*1024*1024.)))
loop_count += 1
print(">> storing %f GB to %s" % (len(string_to_write)/(1024*1024*1024.),file_name))
fh = open(file_name,"w")
fh.writelines(string_to_write)
fh.close()
sys.exit(0)
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/serial_estimate.py
|
<filename>_includes/code/02_parallel_jobs/serial_estimate.py
#!/usr/bin/env python3
import sys
import random
import math
def estimate_pi(total_count):
count_inside = 0
for count in range(0, total_count):
x = random.random()
y = random.random()
d = math.sqrt(x*x + y*y)
if d < 1: count_inside += 1
estimate = 4.0 * count_inside / total_count
return estimate
if __name__=='__main__':
n_iterations = 10000
if len(sys.argv) > 1:
n_iterations = int(sys.argv[1])
my_pi = estimate_pi(n_iterations)
print("[serial version] pi is %f from %i samples" % (my_pi,n_iterations))
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/distributed.dask_numpi.py
|
<reponame>bkmgit/hpc-parallel-novice
#!/usr/bin/env python3
import sys
import math
import dask.array as da
from dask.distributed import Client
import numpy as np
np.random.seed(2017)
da.random.seed(2017)
def inside_circle(total_count, chunk_size = -1):
x = da.random.uniform(size=(total_count),
chunks=(chunk_size))
y = da.random.uniform(size=(total_count),
chunks=(chunk_size))
radii = da.sqrt(x*x + y*y)
filtered = da.where(radii <= 1.0)
indices = np.array(filtered[0])
count = len(radii[indices])
return count
def estimate_pi(total_count, chunk_size=-1):
count = inside_circle(total_count, chunk_size)
return (4.0 * count / total_count)
def main():
n_samples = 10000
if len(sys.argv) > 1:
n_samples = int(sys.argv[1])
chunksize = .1*n_samples
if len(sys.argv) > 2:
chunksize = int(sys.argv[2])
client = Client("tcp://192.168.178.25:8786")
my_pi = estimate_pi(n_samples, chunksize)
sizeof = np.dtype(np.float32).itemsize
print("[parallel version] required memory %.3f MB" % (n_samples*sizeof*3/(1024*1024)))
print("[distributed dask] pi is %f from %i samples" % (my_pi,n_samples))
if __name__=='__main__':
main()
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/count_pi_estimates.py
|
<reponame>bkmgit/hpc-parallel-novice
#!/usr/bin/env python3
import sys
import os
if __name__=='__main__':
file_name = ""
if len(sys.argv) < 2:
print("usage: count_pi_estimates.py <file_name>")
else:
file_name = sys.argv[1]
if os.path.exists(file_name):
print("opening", file_name)
current_file = open(file_name)
current_file_content = current_file.read().split("\n")
count = 0
for line in current_file_content:
if line.startswith("3.1"):
count += 1
print(count)
sys.exit(0)
else:
print("%s was not found" % file_name)
sys.exit(1)
|
bkmgit/hpc-parallel-novice
|
downloads/count_lines.py
|
from __future__ import print_function
import os
import sys
def lines_count(afilename):
""" counts the number of words in the string given by <text> """
if not os.path.exists(afilename):
return 0
return len(open(afilename).readlines())
def main():
if len(sys.argv)<2:
print("usage: python count_lines.py <file(s)>)")
sys.exit(1)
total = 0
for infile in sys.argv[1:]:
len_ = lines_count(infile)
print(len_,infile)
total += len_
print(total,"total")
sys.exit(0)
if __name__ == '__main__':
main()
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/count_pylibs_annotated.py
|
<reponame>bkmgit/hpc-parallel-novice
import os
import sys
import glob
import re
def load_text():
""" searches for the path seen by python (aka sys.path) which contains os.py and reads all .py files in this directory into a large string """
path_of_ospy = ""
text = []
for d in sys.path:
if os.path.isdir(d) and os.path.exists(d+"/os.py"):
path_of_ospy = d
break
if not path_of_ospy or not os.path.exists(path_of_ospy):
print("no modules found in "+sys.path)
return text
std_files = glob.glob(path_of_ospy+"/*.py")
for fn in std_files:
fnf = open(fn,"r")
text.append("".join(fnf.readlines()))
fnf.close()
return "\n".join(text)
def word_count(text):
""" counts the number of words in the string given by <text> """
word_pattern = r'\b[^\W\d_]+\b'
result = re.split(word_pattern,text)
return len(result)
@profile
def main():
text = load_text()
nchars = len(text)
nwords = word_count(text)
print("%i characters and %i words found in standard python lib" % (nchars, nwords))
if len(text):
sys.exit(0)
else:
sys.exit(1)
if __name__ == '__main__':
main()
|
bkmgit/hpc-parallel-novice
|
_includes/code/02_parallel_jobs/print_hostname_and_time.py
|
<gh_stars>10-100
#/usr/bin/env python3
from mpi4py import MPI
from datetime import datetime
def print_hostname():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
hname = MPI.Get_processor_name()
tod = datetime.now().isoformat(' ')
print("this is rank = %2i (total: %2i) running on %s at %s" % (rank,size,hname,tod))
comm.Barrier()
if __name__ == '__main__':
print_hostname()
|
hezhenke/technical-py
|
main.py
|
<filename>main.py
import random, itertools, math, os, json, urllib2, sys, copy
from datetime import date, datetime
import time as timer
import numpy as np
import multiprocessing as mp
def readable_date(unix):
return datetime.fromtimestamp(int(unix)).strftime('%Y-%m-%d %H:%M:%S')
class Indicator(object):
def __init__(self):
raise NotImplementedError("Must be implemented on per-indicator basis")
def tick(self, tick, past):
self.process(tick, past)
self.days += 1
def process(self, data):
raise NotImplementedError("Must be implemented on per-indicator basis")
def get(self):
return self.values
def x_axis(self):
return self.x_axis
class SMA(Indicator):
def __init__(self, period):
self.period = period
self.days = 0
self.values = []
self.x_axis = []
self.under = False
self.over = False
self.crossed_over = False
self.crossed_under = False
def process(self, tick, past):
if self.days - 1 == self.period:
sum = 0
for day in past:
sum += day['close']
self.values.append(sum / len(past))
self.x_axis.append(self.days)
elif self.days - 1 > self.period:
self.crossed_over = self.crossed_under = False
sma = self.values[-1] + ((tick['close'] - self.values[-1]) / self.period)
self.values.append(sma)
self.x_axis.append(self.days)
if tick['close'] > sma and not self.over:
self.crossed_over = True
self.over = True
self.under = False
elif tick['close'] < sma and not self.under:
self.crossed_under = True
self.under = True
self.over = False
class EMA(Indicator):
def __init__(self, period):
self.period = period
self.days = 0
self.values = []
self.x_axis = []
def process(self, tick, past):
if self.days - 1 == self.period:
sum = 0
for day in past:
sum += day['close']
self.values.append(sum / len(past))
self.x_axis.append(self.days)
elif self.days - 1 > self.period:
multiplier = 2 / (float(self.period) + 1)
self.values.append(((tick['close'] - self.values[-1]) * multiplier) + self.values[-1])
self.x_axis.append(self.days)
class RSI(Indicator):
def __init__(self, period):
self.period = period
self.days = 0
self.values = []
self.x_axis = []
self.ema = []
self.ema_x_axis = []
self.sma = 0
self.avg_gains = []
self.avg_losses = []
self.conditions_x = []
self.overbought = []
self.oversold = []
self.is_oversold = False
self.is_overbought = False
def process(self, tick, past):
if self.days - 1 == self.period:
losses = 0
gains = 0
prev_price = past[-1]['close']
for price in past[1:]:
price = price['close']
price_change = price - prev_price
if price_change > 0:
gains += price_change
else:
losses += abs(price_change)
prev_price = price
self.avg_gains.append(gains / self.period)
self.avg_losses.append(losses / self.period)
elif self.days - 1 > self.period:
price_change = tick['close'] - past[-2]['close']
gain = price_change
gain = gain if gain > 0 else 0
loss = price_change
loss = abs(loss) if loss < 0 else 0
avg_gain = ((self.avg_gains[-1] * (self.period - 1)) + gain) / self.period
self.avg_gains.append(avg_gain)
avg_loss = ((self.avg_losses[-1] * (self.period - 1)) + loss) / self.period
self.avg_losses.append(avg_loss)
if avg_loss == 0:
avg_loss = 1
rsi = 100 - (100 / (1 + (avg_gain / avg_loss)))
self.values.append(rsi)
self.x_axis.append(self.days)
if len(self.values) == self.period:
self.sma = sum(self.values) / len(self.values)
elif len(self.values) > self.period:
self.sma = self.sma + ((self.values[-1] - self.values[-2]) / self.period)
self.ema_x_axis.append(self.days)
# for the first ema calculation the ema is equal to the sma
if len(self.ema) == 0:
self.ema.append(self.sma)
else:
multiplier = 2 / (float(self.period) + 1)
self.ema.append(((self.values[-1] - self.ema[-1]) * multiplier) + self.ema[-1])
if len(self.ema) > 2:
rsi_avg = sum(self.ema[:-1]) / len(self.ema[:-1])
variance = 0
i = 0
for rsi in self.ema[:-1]:
variance += math.pow(rsi - rsi_avg, 2)
i += 1
stddev = math.sqrt(variance / (i - 1))
self.conditions_x.append(self.days)
overbought_level = rsi_avg + 2 * stddev
oversold_level = rsi_avg - 2 * stddev
self.is_overbought = self.values[-1] > overbought_level
self.is_oversold = self.values[-1] < oversold_level
self.overbought.append(overbought_level)
self.oversold.append(oversold_level)
class Volume(Indicator):
def __init__(self):
self.values = []
self.x_axis = []
self.colors = []
self.days = 0
self.ema_period = 20
self.ema = 0
self.ema_values = []
self.ema_x_axis = []
def process(self, tick, past):
self.x_axis.append(self.days)
self.values.append(tick['volume'])
if tick['close'] > tick['open']:
self.colors.append('g')
else:
self.colors.append('r')
if self.days + 1 == self.ema_period:
self.sma = sum(self.values) / len(self.values)
self.ema_values.append(self.sma)
self.ema_x_axis.append(self.days)
elif self.days + 1 > self.ema_period:
self.sma = self.sma + ((self.values[-1] - self.values[-2]) / self.ema_period)
self.ema_x_axis.append(self.days)
multiplier = 2 / (float(self.ema_period) + 1)
self.ema_values.append(((self.values[-1] - self.ema_values[-1]) * multiplier) + self.ema_values[-1])
class SupportResistance(Indicator):
def __init__(self, move_size):
self.lows = []
self.highs = []
self.start_price = 0
self.move_size = move_size
self.days = 0
self.hit_low = False
self.hit_high = False
self.new_high = False
self.new_low = False
def process(self, tick, past):
if self.days == 0:
self.highs.append([self.days, tick['close']])
self.lows.append([self.days, tick['close']])
else:
self.new_high = self.new_low = False
if not self.hit_low and tick['high'] > self.highs[-1][1]:
self.highs[-1] = ([self.days, tick['high']])
elif not self.hit_high and tick['low'] < self.lows[-1][1]:
self.lows[-1] = ([self.days, tick['low']])
elif not self.hit_high and (tick['high'] * (1 - self.move_size)) > self.lows[-1][1]:
self.highs.append([self.days, tick['high']])
self.hit_high = True
self.new_high = True
self.hit_low = False
elif not self.hit_low and tick['low'] < (self.highs[-1][1] * (1 - self.move_size)):
self.lows.append([self.days, tick['low']])
self.hit_high = False
self.new_low = True
self.hit_low = True
def higher_high(self):
if len(self.highs) < 2:
return False
return self.highs[-1][1] > self.highs[-2][1]
def lower_high(self):
if len(self.highs) < 2:
return False
return self.highs[-1][1] < self.highs[-2][1]
def higher_low(self):
if len(self.lows) < 2:
return False
return self.lows[-1][1] > self.lows[-2][1]
def lower_low(self):
if len(self.lows) < 2:
return False
return self.lows[-1][1] < self.lows[-2][1]
def get(self):
return {
'highs': self.highs,
'lows': self.lows
}
class Agent(object):
def __init__(self, indicators, instructions, portfolio, ticker):
self.indicators = indicators
self.instructions = instructions
self.ticker = ticker
self.portfolio = portfolio
self.ticks = []
self.num_ticks = 0
def tick(self, data):
self.num_ticks += 1
self.ticks.append(data)
for indicator in self.indicators:
indicator['instance'].tick(data, self.ticks)
self.process_instructions(data)
def process_instructions(self, data):
for instruction in self.instructions:
execute = True
for conditional in instruction['conditions']:
execute = execute and conditional.check(self.indicators)
if execute:
if self.portfolio.in_long and instruction['in_long'] == 'cover':
self.portfolio.close_long(self.ticker, data['close'], self.num_ticks)
if self.portfolio.in_short and instruction['in_short'] == 'cover':
self.portfolio.close_short(self.ticker, data['close'], self.num_ticks)
if not self.portfolio.in_long and instruction['no_position'] == 'long':
self.portfolio.go_long(self.ticker, data['close'], self.num_ticks)
if not self.portfolio.in_short and instruction['no_position'] == 'short':
self.portfolio.go_short(self.ticker, data['close'], self.num_ticks)
class Portfolio(object):
def __init__(self, balance, position_size):
self.balance = balance
self.position_size = position_size
self.in_long = False
self.in_short = False
self.positions = []
self.leverage = 1
def go_long(self, ticker, price, tick):
if self.in_long or self.in_short:
return False
shares = int(self.position_size / price)
self.balance -= price * shares * self.leverage
self.positions.append({
'ticker': ticker, 'entry_price': price, 'shares': shares, 'type': 'long', 'open': True, 'tick_open': tick
})
self.in_long = True
self.position_size = self.balance
def go_short(self, ticker, price, tick):
if self.in_short or self.in_long:
return False
shares = int(self.position_size / price)
self.balance += price * shares * self.leverage
self.positions.append({
'ticker': ticker, 'entry_price': price, 'shares': shares, 'type': 'short', 'open': True, 'tick_open': tick
})
self.in_short = True
self.position_size = self.balance
def close_long(self, ticker, price, tick):
if not self.in_long:
return False
for pos in self.positions:
if pos['ticker'] == ticker and pos['type'] == 'long' and pos['open'] == True:
self.balance += pos['shares'] * price * self.leverage
pos['open'] = False
pos['exit_price'] = price
pos['tick_close'] = tick
pos['net'] = pos['shares'] * (price - pos['entry_price'])
self.in_long = False
self.position_size = self.balance
return True
return False
def close_short(self, ticker, price, tick):
if not self.in_short:
return False
for pos in self.positions:
if pos['ticker'] == ticker and pos['type'] == 'short' and pos['open'] == True:
self.balance -= pos['shares'] * price * self.leverage
pos['open'] = False
pos['exit_price'] = price
pos['tick_close'] = tick
pos['net'] = pos['shares'] * (pos['entry_price'] - price)
self.in_short = False
self.position_size = self.balance
return True
return False
def close_all(self, ticker, price, tick):
return self.close_long(ticker, price, tick) or self.close_short(ticker, price, tick)
class Comparator(object):
def __init__(self, indicator, comparison, rightside, english):
indicator = indicator.split('.')
self.indicator = indicator
self.comparison = comparison
self.rightside = rightside
self.english = english
def check(self, indicators):
indicator_name = self.indicator[0]
indicator_method = self.indicator[1]
indicator_instance = False
for indicator in indicators:
if indicator['name'] == indicator_name:
indicator_instance = indicator['instance']
break
if indicator_instance:
if indicator_method[-2:] != "()":
result = getattr(indicator_instance, indicator_method)
else:
result = getattr(indicator_instance, indicator_method[:-2])()
expression = "%s %s %s" % (result, self.comparison, self.rightside)
return eval(expression)
class DataReader(object):
def __init__(self, ticker):
raise NotImplementedError("Must be implemented for each data reader")
def __iter__(self):
return self
def next(self):
try:
self.counter += 1
return self.format(self.data[self.counter - 1])
except IndexError:
raise StopIteration
def format(self):
raise NotImplementedError("Must be implemented for each data reader")
def store(self):
raise NotImplementedError("Must be implemented for each data reader")
def retrieve(self, key):
raise NotImplementedError("Must be implemented for each data reader")
def exists(self, key):
return False
def getstart(self):
return self.starttime
class GoogleReader(DataReader):
def __init__(self, ticker, interval, period):
self.counter = 0
self.key = "%s,%s,%s" % (ticker, interval, period)
path = 'data/%s-%s.txt' % (ticker, self.key)
try:
open(path)
except IOError:
try:
url = "https://www.google.com/finance/getprices?q=%s&i=%s&p=%sd&f=d,o,h,l,c,v" % (ticker, interval, period)
u = urllib2.urlopen(url)
local = open(path, 'w')
local.write(u.read())
local.close()
except Exception:
print "%s not found" % ticker
sys.exit()
self.data = open(path, 'r')
self.data = map(str.strip, self.data.readlines())
self.columns = {
'close': 1,
'high': 2,
'low': 3,
'open': 4,
'volume': 5
}
self.rows = {
'date': 7,
'ticks': 8
}
for line in self.data:
if self.counter == self.rows['date']:
self.starttime = line.split(',')[0][1:]
if self.counter == self.rows['ticks']:
self.formatted_data = self.format(line)
break;
self.counter += 1
def format(self, data):
data = data.split(',')
self.counter += 1
return {
'row_number': self.counter,
'close': float(data[self.columns['close']]),
'high': float(data[self.columns['high']]),
'low': float(data[self.columns['low']]),
'open': float(data[self.columns['open']]),
'volume': float(data[self.columns['volume']])
}
instructions = [
{
'in_long': 'cover',
'in_short': 'hold',
'no_position': 'hold',
'conditions': [
Comparator('rsi.is_oversold', '==', True, 'RSI signals oversold conditions'),
Comparator('supportresistance.lower_high()', '==', True, 'Lower high'),
Comparator('supportresistance.lower_low()', '==', True, 'Lower low')
]
},
{
'in_long': 'hold',
'in_short': 'cover',
'no_position': 'hold',
'conditions': [
Comparator('supportresistance.higher_high()', '==', True, 'Higher high'),
Comparator('supportresistance.higher_low()', '==', True, 'Higher low')
]
},
{
'in_long': 'hold',
'in_short': 'hold',
'no_position': 'short',
'conditions': [
Comparator('rsi.is_oversold', '==', True, 'RSI signals overbought conditions'),
Comparator('supportresistance.lower_high()', '==', True, 'Lower high'),
Comparator('supportresistance.lower_low()', '==', True, 'Lower low')
]
},
{
'in_long': 'hold',
'in_short': 'hold',
'no_position': 'long',
'conditions': [
Comparator('rsi.is_oversold', '==', True, 'RSI signals oversold conditions'),
Comparator('supportresistance.higher_high()', '==', True, 'Higher high'),
Comparator('supportresistance.higher_low()', '==', True, 'Higher low')
]
}
]
indicators = [
{
'label': '20 Period SMA',
'instance': SMA(20),
'name': 'sma'
},
{
'label': '20 Period EMA',
'instance': EMA(20),
'name': 'ema'
},
{
'label': '14 Period RSI',
'instance': RSI(14),
'name': 'rsi'
},
{
'label': 'Volume',
'instance': Volume(),
'name': 'volume'
},
{
'label': 'Support and Resistance',
'instance': SupportResistance(.01),
'name': 'supportresistance'
}
]
tickers = open('tickers.txt', 'r')
tickers = random.sample([line.split(',') for line in tickers.readlines()][0], 3)
print tickers
def analyze(tickers, portfolio):
holdings = []
initial_balance = portfolio.balance
for ticker in tickers:
holdings.append({
'agent': Agent(copy.deepcopy(indicators), copy.deepcopy(instructions), portfolio, ticker),
'reader': GoogleReader(ticker, interval=5, period=1),
'ticker': ticker
})
i = 0
do_continue = True
base_gains = []
while do_continue:
for holding in holdings:
try:
tick = holding['reader'].next()
if i == 0:
startprice = float(tick['open'])
holding['agent'].tick(tick)
holding['price'] = tick['close']
except StopIteration:
endprice = float(tick['close'])
base_gains.append((endprice - startprice) / startprice * 100)
do_continue = False
break
for holding in holdings:
portfolio.close_all(holding['ticker'], holding['price'], i)
print "Base Gain: %.02f%%" % (sum(base_gains) / len(base_gains))
print "Algo Gain: %.02f%%" % ((portfolio.balance - initial_balance) / initial_balance * 100)
print "Beginning Balance: $%.02f" % initial_balance
print "Ending Balance: $%.02f" % portfolio.balance
# print json.dumps(portfolio.positions, indent=4)
plot = True
plot_extra = True
plot_positions = True
if plot:
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick, candlestick2
for holding in holdings:
candledata = []
time = 0
for tick in holding['agent'].ticks:
candledata.append([float(time), tick['open'], tick['close'], tick['low'], tick['high']])
time += 1
fig, ax = plt.subplots()
ax = plt.subplot2grid((4,4), (0, 0), rowspan=3, colspan=4)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
ax.set_xlim(-1, time)
plt.title(holding['ticker'])
plt.setp(ax.get_xticklabels(), visible=False)
fig.subplots_adjust(bottom=0.2)
candlestick(ax, candledata, width=0.6, colorup='g', colordown='r')
# TODO: make plotting indicators less procedural
if plot_extra:
for indicator in holding['agent'].indicators:
name = indicator['label']
indicator = indicator['instance']
if name in ['20 Period SMA', '20 Period EMA']:
ax.plot(indicator.x_axis, indicator.values, label=name)
elif False and name == '14 Period RSI':
ax2 = ax.twinx()
ax2.set_xlim(-1, time)
ax2.plot(indicator.x_axis, indicator.values)
ax2.plot(indicator.conditions_x, indicator.overbought, ls='--')
ax2.plot(indicator.conditions_x, indicator.oversold, ls='--')
elif name == 'Volume':
bottom = plt.subplot2grid((4,4), (3,0), rowspan=2, colspan=4)
bottom.set_xlim(-.5, time)
bottom.bar(indicator.x_axis, indicator.values, width=0.6, color=indicator.colors)
bottom.plot(indicator.ema_x_axis, indicator.ema_values)
elif name == 'Support and Resistance':
ax.plot(*np.transpose(indicator.highs), marker='', color='y', ls='--')
ax.plot(*np.transpose(indicator.lows), marker='', color='y', ls='--')
if plot_positions:
for position in portfolio.positions:
if position['ticker'] == holding['ticker']:
# plot blue dots for long positions
if position['type'] == 'long':
marker = 'ob'
# plot red dots for short positions
elif position['type'] == 'short':
marker = 'or'
ax.plot(position['tick_open'], position['entry_price'], marker)
'''
ax.annotate('cover', xy=(position['tick_close'], position['exit_price']), xycoords='data',
xytext=(-30, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2")
)
'''
# plot yellow dots when the position is closed (short or long)
ax.plot(position['tick_close'], position['exit_price'], 'oy')
plt.legend()
filename = './graphs/%s.png' % holding['ticker']
plt.savefig(filename,dpi=300)
return {'baseline': float((endprice - startprice) / endprice) * 100,
'algo': float(((float(portfolio.balance) - initial_balance) / initial_balance)) * 100,
'num_ticks': i}
return "Finished Process #%d" % (os.getpid())
def log_result(result):
if result != 0:
results['algo'].append(result['algo'])
results['baseline'].append(result['baseline'])
def go():
start = timer.time()
initial_balance = 10000
position_size = initial_balance / len(tickers)
multithreaded = False
if multithreaded:
pool = mp.Pool(4)
for ticker in tickers:
portfolio = Portfolio(initial_balance)
pool.apply_async(analyze, args=(ticker,portfolio,), callback=log_result)
pool.close()
pool.join()
else:
portfolio = Portfolio(initial_balance, position_size)
result = analyze(tickers, portfolio)
print "Elapsed Time: %f" % (timer.time() - start)
if __name__ == '__main__':
go()
|
swprojects/Buildertron
|
buildertron/forms/uiaboutdialog.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'aboutdialog.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AboutDialog(object):
def setupUi(self, AboutDialog):
AboutDialog.setObjectName("AboutDialog")
AboutDialog.resize(640, 250)
self.gridLayout = QtWidgets.QGridLayout(AboutDialog)
self.gridLayout.setObjectName("gridLayout")
self.label_5 = QtWidgets.QLabel(AboutDialog)
self.label_5.setOpenExternalLinks(True)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 3, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 4, 1, 1, 1)
self.label = QtWidgets.QLabel(AboutDialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(AboutDialog)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(AboutDialog)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(AboutDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 5, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(AboutDialog)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 2, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(AboutDialog)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 1, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(AboutDialog)
self.label_7.setOpenExternalLinks(True)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 1, 1, 1, 1)
self.labelVersion = QtWidgets.QLabel(AboutDialog)
self.labelVersion.setObjectName("labelVersion")
self.gridLayout.addWidget(self.labelVersion, 0, 1, 1, 1)
self.retranslateUi(AboutDialog)
self.buttonBox.accepted.connect(AboutDialog.accept)
self.buttonBox.rejected.connect(AboutDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AboutDialog)
def retranslateUi(self, AboutDialog):
_translate = QtCore.QCoreApplication.translate
AboutDialog.setWindowTitle(_translate("AboutDialog", "About Buildertron..."))
self.label_5.setText(_translate("AboutDialog", "<a href=\'https://github.com/swprojects/Buildertron\'>https://github.com/swprojects/Buildertron</a>"))
self.label.setText(_translate("AboutDialog", "Buildertron"))
self.label_2.setText(_translate("AboutDialog", "Author"))
self.label_4.setText(_translate("AboutDialog", "Homepage"))
self.label_3.setText(_translate("AboutDialog", "<NAME>"))
self.label_6.setText(_translate("AboutDialog", "License"))
self.label_7.setText(_translate("AboutDialog", "<a href=\'https://opensource.org/licenses/MIT\'>The MIT License</a>\n"
""))
self.labelVersion.setText(_translate("AboutDialog", "v0.0"))
|
swprojects/Buildertron
|
buildertron/spec/android_permissions.py
|
<reponame>swprojects/Buildertron
permissions = [
'ACCESS_ALL_DOWNLOADS',
'ACCESS_BLUETOOTH_SHARE',
'ACCESS_CACHE_FILESYSTEM',
'ACCESS_CHECKIN_PROPERTIES',
'ACCESS_CONTENT_PROVIDERS_EXTERNALLY',
'ACCESS_DOWNLOAD_MANAGER',
'ACCESS_DOWNLOAD_MANAGER_ADVANCED',
'ACCESS_DRM_CERTIFICATES',
'ACCESS_EPHEMERAL_APPS',
'ACCESS_FM_RADIO',
'ACCESS_INPUT_FLINGER',
'ACCESS_KEYGUARD_SECURE_STORAGE',
'ACCESS_LOCATION_EXTRA_COMMANDS',
'ACCESS_MOCK_LOCATION',
'ACCESS_MTP',
'ACCESS_NETWORK_CONDITIONS',
'ACCESS_NETWORK_STATE',
'ACCESS_NOTIFICATIONS',
'ACCESS_NOTIFICATION_POLICY',
'ACCESS_PDB_STATE',
'ACCESS_SURFACE_FLINGER',
'ACCESS_VOICE_INTERACTION_SERVICE',
'ACCESS_VR_MANAGER',
'ACCESS_WIFI_STATE',
'ACCESS_WIMAX_STATE',
'ACCOUNT_MANAGER',
'ALLOW_ANY_CODEC_FOR_PLAYBACK',
'ASEC_ACCESS',
'ASEC_CREATE',
'ASEC_DESTROY',
'ASEC_MOUNT_UNMOUNT',
'ASEC_RENAME',
'AUTHENTICATE_ACCOUNTS',
'BACKUP',
'BATTERY_STATS',
'BIND_ACCESSIBILITY_SERVICE',
'BIND_APPWIDGET',
'BIND_CARRIER_MESSAGING_SERVICE',
'BIND_CARRIER_SERVICES',
'BIND_CHOOSER_TARGET_SERVICE',
'BIND_CONDITION_PROVIDER_SERVICE',
'BIND_CONNECTION_SERVICE',
'BIND_DEVICE_ADMIN',
'BIND_DIRECTORY_SEARCH',
'BIND_DREAM_SERVICE',
'BIND_INCALL_SERVICE',
'BIND_INPUT_METHOD',
'BIND_INTENT_FILTER_VERIFIER',
'BIND_JOB_SERVICE',
'BIND_KEYGUARD_APPWIDGET',
'BIND_MIDI_DEVICE_SERVICE',
'BIND_NFC_SERVICE',
'BIND_NOTIFICATION_LISTENER_SERVICE',
'BIND_NOTIFICATION_RANKER_SERVICE',
'BIND_PACKAGE_VERIFIER',
'BIND_PRINT_RECOMMENDATION_SERVICE',
'BIND_PRINT_SERVICE',
'BIND_PRINT_SPOOLER_SERVICE',
'BIND_QUICK_SETTINGS_TILE',
'BIND_REMOTEVIEWS',
'BIND_REMOTE_DISPLAY',
'BIND_ROUTE_PROVIDER',
'BIND_RUNTIME_PERMISSION_PRESENTER_SERVICE',
'BIND_SCREENING_SERVICE',
'BIND_TELECOM_CONNECTION_SERVICE',
'BIND_TEXT_SERVICE',
'BIND_TRUST_AGENT',
'BIND_TV_INPUT',
'BIND_TV_REMOTE_SERVICE',
'BIND_VOICE_INTERACTION',
'BIND_VPN_SERVICE',
'BIND_VR_LISTENER_SERVICE',
'BIND_WALLPAPER',
'BLUETOOTH',
'BLUETOOTH_ADMIN',
'BLUETOOTH_MAP',
'BLUETOOTH_PRIVILEGED',
'BLUETOOTH_STACK',
'BROADCAST_CALLLOG_INFO',
'BROADCAST_NETWORK_PRIVILEGED',
'BROADCAST_PACKAGE_REMOVED',
'BROADCAST_PHONE_ACCOUNT_REGISTRATION',
'BROADCAST_SMS',
'BROADCAST_STICKY',
'BROADCAST_WAP_PUSH',
'CACHE_CONTENT',
'CALL_PRIVILEGED',
'CAMERA_DISABLE_TRANSMIT_LED',
'CAMERA_SEND_SYSTEM_EVENTS',
'CAPTURE_AUDIO_HOTWORD',
'CAPTURE_AUDIO_OUTPUT',
'CAPTURE_SECURE_VIDEO_OUTPUT',
'CAPTURE_TV_INPUT',
'CAPTURE_VIDEO_OUTPUT',
'CARRIER_FILTER_SMS',
'CHANGE_APP_IDLE_STATE',
'CHANGE_BACKGROUND_DATA_SETTING',
'CHANGE_COMPONENT_ENABLED_STATE',
'CHANGE_CONFIGURATION',
'CHANGE_DEVICE_IDLE_TEMP_WHITELIST',
'CHANGE_NETWORK_STATE',
'CHANGE_WIFI_MULTICAST_STATE',
'CHANGE_WIFI_STATE',
'CHANGE_WIMAX_STATE',
'CLEAR_APP_CACHE',
'CLEAR_APP_GRANTED_URI_PERMISSIONS',
'CLEAR_APP_USER_DATA',
'CONFIGURE_DISPLAY_COLOR_TRANSFORM',
'CONFIGURE_WIFI_DISPLAY',
'CONFIRM_FULL_BACKUP',
'CONNECTIVITY_INTERNAL',
'CONTROL_INCALL_EXPERIENCE',
'CONTROL_KEYGUARD',
'CONTROL_LOCATION_UPDATES',
'CONTROL_VPN',
'CONTROL_WIFI_DISPLAY',
'COPY_PROTECTED_DATA',
'CREATE_USERS',
'CRYPT_KEEPER',
'DELETE_CACHE_FILES',
'DELETE_PACKAGES',
'DEVICE_POWER',
'DIAGNOSTIC',
'DISABLE_KEYGUARD',
'DISPATCH_NFC_MESSAGE',
'DISPATCH_PROVISIONING_MESSAGE',
'DOWNLOAD_CACHE_NON_PURGEABLE',
'DUMP',
'DVB_DEVICE',
'EXPAND_STATUS_BAR',
'FACTORY_TEST',
'FILTER_EVENTS',
'FLASHLIGHT',
'FORCE_BACK',
'FORCE_STOP_PACKAGES',
'FRAME_STATS',
'FREEZE_SCREEN',
'GET_ACCOUNTS_PRIVILEGED',
'GET_APP_GRANTED_URI_PERMISSIONS',
'GET_APP_OPS_STATS',
'GET_DETAILED_TASKS',
'GET_INTENT_SENDER_INTENT',
'GET_PACKAGE_IMPORTANCE',
'GET_PACKAGE_SIZE',
'GET_PASSWORD',
'GET_PROCESS_STATE_AND_OOM_SCORE',
'GET_TASKS',
'GET_TOP_ACTIVITY_INFO',
'GLOBAL_SEARCH',
'GLOBAL_SEARCH_CONTROL',
'GRANT_RUNTIME_PERMISSIONS',
'HARDWARE_TEST',
'HDMI_CEC',
'INJECT_EVENTS',
'INSTALL_GRANT_RUNTIME_PERMISSIONS',
'INSTALL_LOCATION_PROVIDER',
'INSTALL_PACKAGES',
'INTENT_FILTER_VERIFICATION_AGENT',
'INTERACT_ACROSS_USERS',
'INTERACT_ACROSS_USERS_FULL',
'INTERNAL_SYSTEM_WINDOW',
'INTERNET',
'INVOKE_CARRIER_SETUP',
'KILL_BACKGROUND_PROCESSES',
'KILL_UID',
'LAUNCH_TRUST_AGENT_SETTINGS',
'LOCAL_MAC_ADDRESS',
'LOCATION_HARDWARE',
'LOOP_RADIO',
'MANAGE_ACCOUNTS',
'MANAGE_ACTIVITY_STACKS',
'MANAGE_APP_OPS_RESTRICTIONS',
'MANAGE_APP_TOKENS',
'MANAGE_CA_CERTIFICATES',
'MANAGE_DEVICE_ADMINS',
'MANAGE_DOCUMENTS',
'MANAGE_FINGERPRINT',
'MANAGE_MEDIA_PROJECTION',
'MANAGE_NETWORK_POLICY',
'MANAGE_NOTIFICATIONS',
'MANAGE_PROFILE_AND_DEVICE_OWNERS',
'MANAGE_SOUND_TRIGGER',
'MANAGE_USB',
'MANAGE_USERS',
'MANAGE_VOICE_KEYPHRASES',
'MASTER_CLEAR',
'MEDIA_CONTENT_CONTROL',
'MODIFY_APPWIDGET_BIND_PERMISSIONS',
'MODIFY_AUDIO_ROUTING',
'MODIFY_AUDIO_SETTINGS',
'MODIFY_CELL_BROADCASTS',
'MODIFY_DAY_NIGHT_MODE',
'MODIFY_NETWORK_ACCOUNTING',
'MODIFY_PARENTAL_CONTROLS',
'MODIFY_PHONE_STATE',
'MOUNT_FORMAT_FILESYSTEMS',
'MOUNT_UNMOUNT_FILESYSTEMS',
'MOVE_PACKAGE',
'NET_ADMIN',
'NET_TUNNELING',
'NFC',
'NFC_HANDOVER_STATUS',
'NOTIFY_PENDING_SYSTEM_UPDATE',
'OBSERVE_GRANT_REVOKE_PERMISSIONS',
'OEM_UNLOCK_STATE',
'OVERRIDE_WIFI_CONFIG',
'PACKAGE_USAGE_STATS',
'PACKAGE_VERIFICATION_AGENT',
'PACKET_KEEPALIVE_OFFLOAD',
'PEERS_MAC_ADDRESS',
'PERFORM_CDMA_PROVISIONING',
'PERFORM_SIM_ACTIVATION',
'PERSISTENT_ACTIVITY',
'PROCESS_CALLLOG_INFO',
'PROCESS_PHONE_ACCOUNT_REGISTRATION',
'PROVIDE_TRUST_AGENT',
'QUERY_DO_NOT_ASK_CREDENTIALS_ON_BOOT',
'READ_BLOCKED_NUMBERS',
'READ_DREAM_STATE',
'READ_EXTERNAL_STORAGE',
'READ_FRAME_BUFFER',
'READ_INPUT_STATE',
'READ_INSTALL_SESSIONS',
'READ_LOGS',
'READ_NETWORK_USAGE_HISTORY',
'READ_OEM_UNLOCK_STATE',
'READ_PRECISE_PHONE_STATE',
'READ_PRIVILEGED_PHONE_STATE',
'READ_PROFILE',
'READ_SEARCH_INDEXABLES',
'READ_SOCIAL_STREAM',
'READ_SYNC_SETTINGS',
'READ_SYNC_STATS',
'READ_USER_DICTIONARY',
'READ_WIFI_CREDENTIAL',
'REAL_GET_TASKS',
'REBOOT',
'RECEIVE_BLUETOOTH_MAP',
'RECEIVE_BOOT_COMPLETED',
'RECEIVE_DATA_ACTIVITY_CHANGE',
'RECEIVE_EMERGENCY_BROADCAST',
'RECEIVE_MEDIA_RESOURCE_USAGE',
'RECEIVE_STK_COMMANDS',
'RECEIVE_WIFI_CREDENTIAL_CHANGE',
'RECOVERY',
'REGISTER_CALL_PROVIDER',
'REGISTER_CONNECTION_MANAGER',
'REGISTER_SIM_SUBSCRIPTION',
'REGISTER_WINDOW_MANAGER_LISTENERS',
'REMOTE_AUDIO_PLAYBACK',
'REMOVE_DRM_CERTIFICATES',
'REMOVE_TASKS',
'REORDER_TASKS',
'REQUEST_IGNORE_BATTERY_OPTIMIZATIONS',
'REQUEST_INSTALL_PACKAGES',
'RESET_FINGERPRINT_LOCKOUT',
'RESET_SHORTCUT_MANAGER_THROTTLING',
'RESTART_PACKAGES',
'RETRIEVE_WINDOW_CONTENT',
'RETRIEVE_WINDOW_TOKEN',
'REVOKE_RUNTIME_PERMISSIONS',
'SCORE_NETWORKS',
'SEND_CALL_LOG_CHANGE',
'SEND_DOWNLOAD_COMPLETED_INTENTS',
'SEND_RESPOND_VIA_MESSAGE',
'SEND_SMS_NO_CONFIRMATION',
'SERIAL_PORT',
'SET_ACTIVITY_WATCHER',
'SET_ALWAYS_FINISH',
'SET_ANIMATION_SCALE',
'SET_DEBUG_APP',
'SET_INPUT_CALIBRATION',
'SET_KEYBOARD_LAYOUT',
'SET_ORIENTATION',
'SET_POINTER_SPEED',
'SET_PREFERRED_APPLICATIONS',
'SET_PROCESS_LIMIT',
'SET_SCREEN_COMPATIBILITY',
'SET_TIME',
'SET_TIME_ZONE',
'SET_WALLPAPER',
'SET_WALLPAPER_COMPONENT',
'SET_WALLPAPER_HINTS',
'SHUTDOWN',
'SIGNAL_PERSISTENT_PROCESSES',
'START_ANY_ACTIVITY',
'START_PRINT_SERVICE_CONFIG_ACTIVITY',
'START_TASKS_FROM_RECENTS',
'STATUS_BAR',
'STATUS_BAR_SERVICE',
'STOP_APP_SWITCHES',
'STORAGE_INTERNAL',
'SUBSCRIBED_FEEDS_READ',
'SUBSCRIBED_FEEDS_WRITE',
'SUBSTITUTE_NOTIFICATION_APP_NAME',
'SYSTEM_ALERT_WINDOW',
'TABLET_MODE',
'TEMPORARY_ENABLE_ACCESSIBILITY',
'TETHER_PRIVILEGED',
'TRANSMIT_IR',
'TRUST_LISTENER',
'TV_INPUT_HARDWARE',
'TV_VIRTUAL_REMOTE_CONTROLLER',
'UPDATE_APP_OPS_STATS',
'UPDATE_CONFIG',
'UPDATE_DEVICE_STATS',
'UPDATE_LOCK',
'UPDATE_LOCK_TASK_PACKAGES',
'USER_ACTIVITY',
'USE_CREDENTIALS',
'VIBRATE',
'WAKE_LOCK',
'WRITE_APN_SETTINGS',
'WRITE_BLOCKED_NUMBERS',
'WRITE_DREAM_STATE',
'WRITE_GSERVICES',
'WRITE_MEDIA_STORAGE',
'WRITE_PROFILE',
'WRITE_SECURE_SETTINGS',
'WRITE_SETTINGS',
'WRITE_SMS',
'WRITE_SOCIAL_STREAM',
'WRITE_SYNC_SETTINGS',
'WRITE_USER_DICTIONARY',
]
def get_android_permissions():
return permissions
|
swprojects/Buildertron
|
buildertron/buildertron.py
|
<filename>buildertron/buildertron.py
#!/usr/bin/python3
# -*- coding: utf-8 -*
"""
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import json
import logging
import os
from os.path import basename, expanduser, exists, dirname, join, realpath
import sys
from ast import literal_eval as make_tuple
from configparser import SafeConfigParser
from functools import partial
from sys import platform
from subprocess import PIPE, call, check_output
from PyQt5.QtCore import (Qt, QTimer, QSize)
from PyQt5.QtGui import (QIcon, QPixmap, QColor)
from PyQt5.QtWidgets import (QApplication, QMainWindow, QSplashScreen, QAction, QLabel,
QComboBox, QFileDialog, QMessageBox, QColorDialog)
appPath = ''
if __name__ != '__main__':
# this allows us to import relatively
sys.path.append(dirname(realpath(__file__)))
appPath = dirname(realpath(__file__)) + '/'
from dialogs import (aboutdialog, settingsdialog, submitissuedialog, androidpermissionsdialog)
from forms.uimainwindow import Ui_MainWindow
from spec import translate_objects
from spec.buildozerdefault import get_default_template
from spec.android_presplash_colors import get_android_presplash_colors
from version import __version__
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
SYS_ARGS = {
'--verbose': 0,
}
# verbosity
LOG_LEVELS = {
'1': 20, # Info
'2': 10, # Debug
'3': 30, # Warning
'4': 40, # Error
'5': 50, # Critical
}
SPLASH_TIMEOUT = 400
ANDROID_PRESPLASH_COLORS = get_android_presplash_colors()
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self._specfile = None
self._config = {}
self._config.update(self.appDefaults)
self.checkBoxes = {}
self.specObjects = {}
self.envPaths = {}
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle('BuilderTron v{0}'.format(__version__))
self.setWindowIcon(QIcon(appPath + 'buildertron.ico'))
self.createToolbar()
self.collectUiObjects()
self.loadDefaultSpec()
self.appLoadConfig()
self.updateUI()
if self.config.get('show_splashscreen', True):
self.initSplash()
self.show()
self.refreshPreview()
self.loadRecentFile()
@property
def appDefaults(self):
return {
'ask_save_on_close': True,
'show_splashscreen': True,
'keep_recent_files': True,
'recent_files': [],
'override_build': False,
'override_build_cmd': 'buildozer {target} {buildmode}',
'override_clean': False,
'override_clean_cmd': 'buildozer {target} clean',
'override_deploy': False,
'override_deploy_cmd': 'buildozer {target} deploy',
'override_distclean': False,
'override_distclean_cmd': 'buildozer distclean',
'override_run': False,
'override_run_cmd': 'buildozer {target} run',
'override_serve': False,
'override_serve_cmd': 'buildozer {target} serve',
'override_terminal': False,
'override_terminal_cmd': 'xterm -hold -e',
'custom_target': '',
'use_app_blacklist': False,
'app_blacklist': '',
'use_buildozer_blacklist': False,
'buildozer_blacklist': '',
'use_app_substitute': False,
'app_substitute': '',
'use_buildozer_substitute': False,
'buildozer_substitute': '',
'use_custom_spec': False,
'custom_spec': get_default_template(),
}
def appLoadConfig(self):
logging.info('MainWindow->appLoadConfig')
try:
with open(self.settingsPath, 'r') as file:
data = json.load(file)
file.close()
self.config.update(data)
except Exception:
self.appSaveConfig()
self.appRestoreStore()
def appRestoreStore(self):
logging.info('MainWindow->appRestoreStore')
self.updateRecentFilesMenu()
def appSaveConfig(self):
logging.info('MainWindow->appSaveConfig')
try:
with open(self.settingsPath, 'w') as file:
json.dump(self.config, file, sort_keys=True, indent=2)
except PermissionError:
logging.info('PermissionError: you do not permission to save config')
logging.info(self.config)
@property
def buildmode(self):
buildmode = self.comboBuild.currentText()
return buildmode
@property
def buildozerSpecDefault(self):
return join(appPath, 'spec', 'buildozer-default.spec')
@property
def cmdBuild(self):
default = self.appDefaults['override_build_cmd']
if self.config.get('override_build', False):
return self.config.get('override_build_cmd', default)
return default
@property
def cmdClean(self):
default = self.appDefaults['override_clean_cmd']
if self.config.get('override_distclean', False):
return self.config.get('override_clean_cmd', default)
return default
@property
def cmdDeploy(self):
default = self.appDefaults['override_deploy_cmd']
if self.config.get('override_deploy', False):
return self.config.get('override_deploy_cmd', default)
return default
@property
def cmdDistclean(self):
default = self.appDefaults['override_distclean_cmd']
if self.config.get('override_distclean', False):
return self.config.get('override_distclean_cmd', default)
return default
@property
def cmdRun(self):
default = self.appDefaults['override_run_cmd']
if self.config.get('override_run', False):
return self.config.get('override_run_cmd', default)
return default
@property
def cmdServe(self):
default = self.appDefaults['override_serve_cmd']
if self.config.get('override_serve', False):
return self.config.get('override_serve_cmd', default)
return default
@property
def config(self):
return self._config
@property
def settingsPath(self):
if platform == 'linux':
home = expanduser('~')
base = '{0}/.local/share/buildertron/'.format(home)
os.system('mkdir -p {0}'.format(base))
path = join(base, 'settings.json')
else:
path = 'settings.json'
return path
@property
def specfileDir(self):
return dirname(self._specfile)
@property
def logcat(self):
return self.comboLogcat.currentText()
@property
def targetname(self):
return self.comboTarget.currentText()
def checkCommandOutput(self, cmd):
out = ''
try:
out = check_output(cmd).decode('utf8')
except Exception:
pass
return out
def clearRecentFiles(self):
self.config['recent_files'] = []
self.updateRecentFilesMenu()
self.appSaveConfig()
def closeEvent(self, event):
try:
self.splash.close()
except Exception:
pass
if not self.config.get('ask_save_on_close', True):
event.accept()
return
confirm = QMessageBox(self)
confirm.setWindowTitle('Save changes?')
confirm.setText('Save changes before closing')
confirm.setStandardButtons(QMessageBox.No | QMessageBox.Cancel | QMessageBox.Save)
ret = confirm.exec()
if ret == QMessageBox.Save:
save = self.onSave()
if save is False:
event.ignore()
return
elif ret == QMessageBox.Cancel:
event.ignore()
return
self.appSaveConfig()
event.accept()
def closeFile(self):
logging.info('MainWindow->closeFile')
def collectUiObjects(self):
logging.debug(self.ui.__dict__.keys())
for n, obj in self.ui.__dict__.items():
if n.startswith('checkbox_'):
self.checkBoxes[n[9:]] = obj
elif n.startswith('lineedit_'):
self.specObjects[n[9:]] = obj
elif n.startswith('lineedit2_'):
self.specObjects[n[10:]] = obj
elif n.startswith('checkbox2_'):
self.specObjects[n[10:]] = obj
elif n.startswith('combobox_'):
self.specObjects[n[9:]] = obj
elif n.startswith('spinbox_'):
self.specObjects[n[8:]] = obj
elif n.startswith('colorpick_'):
obj.clicked.connect(self.openPresplashPicker)
elif n.startswith('permissions_'):
obj.clicked.connect(self.openAndroidPermissions)
elif n.startswith('toolbutton_'):
obj.clicked.connect(partial(self.onToolButton, name=n[11:]))
def createToolbar(self):
self.toolbar = self.addToolBar('Toolbar')
self.toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.toolbar.setIconSize(QSize(32, 32))
self.toolbar.setMovable(False)
act = QAction(QIcon(self.iconPath('new.png')), 'New', self)
self.toolbar.addAction(act)
act.triggered.connect(self.onNew)
act = QAction(QIcon(self.iconPath('open.png')), 'Open', self)
self.toolbar.addAction(act)
act.triggered.connect(self.onLoad)
act = QAction(QIcon(self.iconPath('save.png')), 'Save', self)
self.toolbar.addAction(act)
act.triggered.connect(self.onSave)
act = QAction(QIcon(self.iconPath('saveas.png')), 'Save As...', self)
self.toolbar.addAction(act)
act.triggered.connect(self.onSaveAs)
self.toolbar.addSeparator()
act = QAction(QIcon(self.iconPath('distclean.png')), 'Dist Clean', self)
self.toolbar.addAction(act)
act.triggered.connect(partial(self.onCmd, act))
act.setToolTip('buildozer distclean')
act = QAction(QIcon(self.iconPath('clean.png')), 'Clean', self)
self.toolbar.addAction(act)
act.triggered.connect(partial(self.onCmd, act))
act.hovered.connect(partial(self.onToolbarHover, act))
label = QLabel(self)
label.setText(' Build: ')
self.toolbar.addWidget(label)
self.comboBuild = QComboBox(self)
self.toolbar.addWidget(self.comboBuild)
self.comboBuild.addItems(['debug', 'release'])
act = QAction(QIcon(self.iconPath('build.png')), 'Build', self)
self.toolbar.addAction(act)
act.triggered.connect(partial(self.onCmd, act))
act.hovered.connect(partial(self.onToolbarHover, act))
self.toolbar.addSeparator()
act = QAction(QIcon(self.iconPath('deploy.png')), 'Deploy', self)
self.toolbar.addAction(act)
act.triggered.connect(partial(self.onCmd, act))
act.hovered.connect(partial(self.onToolbarHover, act))
act = QAction(QIcon(self.iconPath('run.png')), 'Run', self)
self.toolbar.addAction(act)
act.triggered.connect(partial(self.onCmd, act))
act.hovered.connect(partial(self.onToolbarHover, act))
act = QAction(QIcon(self.iconPath('serve.png')), 'Serve', self)
self.toolbar.addAction(act)
act.triggered.connect(partial(self.onCmd, act))
act.hovered.connect(partial(self.onToolbarHover, act))
label = QLabel(self)
label.setText(' Target: ')
self.toolbar.addWidget(label)
self.comboTarget = QComboBox(self)
self.toolbar.addWidget(self.comboTarget)
label = QLabel(self)
label.setText(' Logcat: ')
self.toolbar.addWidget(label)
self.comboLogcat = QComboBox(self)
self.toolbar.addWidget(self.comboLogcat)
self.comboLogcat.addItems(['off', 'on'])
label = QLabel(self)
label.setText(' Log Level: ')
self.toolbar.addWidget(label)
self.comboLogLevel = QComboBox(self)
self.toolbar.addWidget(self.comboLogLevel)
self.comboLogLevel.addItems(['2', '1', '0'])
self.specObjects['log_level'] = self.comboLogLevel
def getObjectValue(self, object):
# checkBox?
value = None
try:
value = object.checkState()
if value == 0:
value = False
else:
value = True
return value
except Exception:
pass
# lineEdit?
try:
value = object.text()
return value
except Exception:
pass
# comboBox?
try:
value = object.currentText()
return value
except Exception:
pass
# spinBox?
try:
value = object.value()
return value
except Exception:
pass
return value
def getSpec(self):
"""Returns specification"""
spec = {}
for k, v in self.specObjects.items():
if not v.isEnabled():
spec[k] = None
continue
spec[k] = self.getObjectValue(v)
return spec
def getSpecOutput(self):
# TODO - needs refactoring
target = self.getObjectValue(self.comboTarget)
logcat = self.getObjectValue(self.comboLogcat)
build = self.getObjectValue(self.comboBuild)
app_blacklist = None
use_app_blacklist = self.config.get('use_app_blacklist')
if use_app_blacklist:
app_blacklist = self.config.get('app_blacklist')
app_blacklist = app_blacklist.replace('\n', '')
app_blacklist = app_blacklist.split(',')
buildozer_blacklist = None
use_buildozer_blacklist = self.config.get('use_buildozer_blacklist')
if use_buildozer_blacklist:
buildozer_blacklist = self.config.get('buildozer_blacklist')
buildozer_blacklist = buildozer_blacklist.replace('\n', '')
buildozer_blacklist = buildozer_blacklist.split(',')
app_substitute = {}
appsub = {}
use_app_substitute = self.config.get('use_app_substitute')
if use_app_substitute:
appsub = self.config.get('app_substitute')
appsub = appsub.replace('\n', '')
appsub = appsub.split(',')
appsub = [k for k in appsub if '=' in k]
for k in appsub:
try:
key, sub = k.split('=')
app_substitute[key] = sub
except Exception:
continue
buildozer_substitute = {}
buildozersub = {}
use_buildozer_substitute = self.config.get('use_buildozer_substitute')
if use_buildozer_substitute:
buildozersub = self.config.get('buildozer_substitute')
buildozersub = buildozersub.replace('\n', '')
buildozersub = buildozersub.split(',')
buildozersub = [k for k in buildozersub if '=' in k]
for k in buildozersub:
try:
key, sub = k.split('=')
buildozer_substitute[key] = sub
except Exception:
continue
spec = self.getSpec()
trans = translate_objects.translate
app_text = ''
buildozer_text = ''
if self.config.get('use_custom_spec', False):
custom_spec = self.config.get('custom_spec')
else:
custom_spec = self.appDefaults['custom_spec']
custom_spec += '[{0}]\n'.format('buildertron')
custom_spec += '{0} = {1}\n'.format('targetname', target)
custom_spec += '{0} = {1}\n'.format('logcat', logcat)
custom_spec += '{0} = {1}\n'.format('build', build)
if app_substitute:
custom_spec += 'app_substitute = {0}\n'.format(list(app_substitute.items()))
if buildozer_substitute:
custom_spec += 'buildozer_substitute = {0}\n'.format(list(buildozer_substitute.items()))
for k, v in trans['app'].items():
if v not in spec:
continue
value = spec[v]
if value is None:
continue
if app_blacklist and k in app_blacklist:
continue
j = None
if app_substitute:
j = app_substitute.get(k, None)
if j:
k = j
app_text += ('{0} = {1}\n'.format(k, str(value)))
for k, v in trans['buildozer'].items():
if v not in spec:
continue
value = spec[v]
if value is None:
continue
if buildozer_blacklist and k in buildozer_blacklist:
continue
if buildozer_substitute:
j = buildozer_substitute.get(k, None)
j = None
if j:
k = j
buildozer_text += ('{0} = {1}\n'.format(k, str(value)))
result = custom_spec.format(app=app_text, buildozer=buildozer_text)
return result
def loadDefaultSpec(self):
logging.info('MainWindow->loadDefaultSpec')
self.loadSpec('spec/buildozer-default.spec')
for chk in self.checkBoxes.values():
chk.setCheckState(0)
def loadRecentFile(self):
recent_files = self.config.get('recent_files', [])
try:
file = recent_files[0]
if exists(file):
self.loadSpec(file)
self._specfile = file
self.updateTitle()
except Exception as e:
logging.info('Failed to open recent file. {0}'.format(e))
self.refreshPreview()
def loadSpec(self, specfile):
"""Loads buildozer spec
"""
logging.info('MainWindow->loadSpec')
logging.info(specfile)
specfile = realpath(specfile)
if not exists(specfile):
return
config = SafeConfigParser()
trans = translate_objects.translate
config.read([specfile])
config_dict = {}
for s in config.sections():
config_dict[s] = {}
for k, v in config.items(s):
try:
config_dict[s][k] = v
except Exception as e:
continue
app_substitute = {}
buildozer_substitute = {}
buildertron_conf = {}
try:
buildertron_conf = config_dict['buildertron']
except Exception as e:
logging.info(e)
if buildertron_conf:
target = buildertron_conf.get('targetname', None)
if target:
index = self.comboTarget.findText(target)
self.comboTarget.setCurrentIndex(index)
logcat = buildertron_conf.get('logcat', None)
if logcat:
index = self.comboLogcat.findText(logcat)
self.comboLogcat.setCurrentIndex(index)
build = buildertron_conf.get('build', None)
if build:
index = self.comboBuild.findText(str(build))
self.comboBuild.setCurrentIndex(index)
# unsubstitute values
app_substitute = buildertron_conf.get('app_substitute', {})
try:
app_substitute = {v: k for k, v in make_tuple(str((app_substitute)))}
except Exception as e:
app_substitute = {}
logging.info(e)
try:
buildozer_substitute = {v: k for k, v in make_tuple(str(buildozer_substitute))}
except Exception as e:
buildozer_substitute = {}
logging.info(e)
for section in config_dict:
if section == 'buildertron':
continue
for key, value in config_dict[section].items():
if section == 'app':
key = app_substitute.get(key, key)
elif section == 'buildozer':
key = buildozer_substitute.get(key, key)
t = trans[section].get(key)
obj = self.specObjects.get(t)
if not obj:
continue
try:
self.setObjectValue(obj, value)
except Exception as e:
logging.info(e)
# also try to check box if it exists
try:
self.checkBoxes[t].setCheckState(2)
except Exception as e:
logging.info(e)
return specfile
def iconPath(self, icon):
return join(appPath, 'icons', icon)
def initSplash(self):
self.splash = QSplashScreen(QPixmap(appPath + "splash.png"), Qt.WindowStaysOnTopHint)
self.splash.show()
QTimer.singleShot(SPLASH_TIMEOUT, lambda: self.splash.close())
def onButton(self):
print(1)
def onCmd(self, act):
label = act.text()
if not self._specfile and label != 'Dist Clean':
return
if label == 'Dist Clean':
label = 'Distclean'
elif not self._specfile:
return
try:
cmd = getattr(self, 'cmd' + label)
except Exception as e:
return
logcat = self.logcat
buildmode = self.buildmode
targetname = self.targetname
if logcat == 'on' and label not in ['Distclean', 'Clean']:
logcat = ' logcat'
else:
logcat = ''
cmd = '{c}{logcat}'.format(c=cmd.format(target=targetname, buildmode=buildmode), logcat=logcat)
if label == 'Distclean':
self.runCmd(cmd=cmd)
return
self.runCmd(cwd=self.specfileDir, cmd=cmd)
def onLoad(self, **kwargs):
logging.info('MainWindow->onLoad')
ask_save = kwargs.get('ask_save', True)
if ask_save:
confirm = QMessageBox(self)
confirm.setWindowTitle("Save changes?")
confirm.setText("Save changes before loading another file?")
confirm.setStandardButtons(QMessageBox.No | QMessageBox.Cancel | QMessageBox.Save)
ret = confirm.exec()
if ret == QMessageBox.Save:
self.onSave()
elif ret == QMessageBox.Cancel:
return
dlg = QFileDialog(self)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
options |= QFileDialog.ExistingFiles
while True:
specfile, _ = dlg.getOpenFileName(self, 'Load buildozer.spec', 'buildozer.spec', options=options)
if not specfile:
return
if not basename(specfile) == 'buildozer.spec':
continue
break
if not basename(specfile) == 'buildozer.spec':
return
self.loadSpec(specfile)
self._specfile = specfile
self.updateTitle()
self.updateRecentFiles()
def onMenubar(self, action):
logging.info('MainWindow->Menubar->{}'.format(action.text()))
text = action.text()
if text == 'About...':
self.openAbout()
elif text == 'Build':
self.onCmd(text)
elif text == 'Clean':
self.onCmd(text)
elif text == 'Clear Recent Files':
self.clearRecentFiles()
elif text == 'Dist Clean':
self.onCmd(text)
elif text == 'Deploy':
self.onCmd(text)
elif text == 'Serve':
self.onCmd(text)
elif text == 'Exit':
self.close()
elif text == 'Load':
self.onLoad()
elif text == 'New':
self.onNew()
elif text == 'Settings':
self.openSettings()
elif text == 'Save':
self.onSave()
elif text == 'Save As...':
self.onSaveAs()
elif text == 'Submit Issue':
self.openSubmitIssue()
def onNew(self):
logging.info('MainWindow->onNew')
confirm = QMessageBox(self)
confirm.setWindowTitle("Save changes?")
confirm.setText("Save changes before creating new file?")
confirm.setStandardButtons(QMessageBox.No | QMessageBox.Cancel | QMessageBox.Save)
ret = confirm.exec()
if ret == QMessageBox.Save:
self.onSave()
elif ret == QMessageBox.Cancel:
return
self.loadDefaultSpec()
self._specfile = None
self.updateTitle()
def onRecentFile(self, file):
logging.info('MainWindow->onRecentFile')
confirm = QMessageBox(self)
confirm.setWindowTitle('Load Recent File?')
confirm.setText('Current changes will be lost. Continue?')
confirm.setStandardButtons(QMessageBox.Cancel | QMessageBox.Yes)
ret = confirm.exec()
if ret != QMessageBox.Yes:
return
self.loadDefaultSpec()
self._specfile = None
file = self.loadSpec(file)
if file:
self._specfile = file
self.updateTitle()
def onSave(self, **kwargs):
logging.info('MainWindow->onSave')
msg = kwargs.get('msg', 'Save buildozer.spec')
if not self._specfile:
dlg = QFileDialog(self)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
while True:
specfile, _ = dlg.getSaveFileName(self, msg, 'buildozer.spec', options=options)
if not specfile:
return False
if not basename(specfile) == 'buildozer.spec':
continue
break
self._specfile = specfile
specfile = self._specfile
result = self.getSpecOutput()
with open(specfile, 'w') as file:
file.write(result)
self.updateRecentFiles()
self.appSaveConfig()
self.updateTitle()
def onSaveAs(self):
dlg = QFileDialog(self)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
while True:
specfile, _ = dlg.getSaveFileName(self, 'Save to buildozer.spec', 'buildozer.spec', options=options)
if not specfile:
return
if not basename(specfile) == 'buildozer.spec':
continue
break
self._specfile = specfile
self.onSave()
def onToolbarHover(self, act):
label = act.text()
try:
cmd = getattr(self, 'cmd' + label)
except Exception as e:
return
logcat = self.logcat
buildmode = self.buildmode
targetname = self.targetname
if logcat == 'on' and label not in ['Dist Clean', 'Clean']:
logcat = ' logcat'
else:
logcat = ''
cmd = '{c}{logcat}'.format(c=cmd.format(target=targetname, buildmode=buildmode), logcat=logcat)
act.setToolTip('cmd: {0}'.format(cmd))
def onToolButton(self, name):
logging.info('MainWindow->onToolButton->{0}'.format(name))
object = self.specObjects[name]
dlg = QFileDialog(self)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
options |= QFileDialog.DirectoryOnly
folder = dlg.getExistingDirectory(self, 'Choose directory', '.', options=options)
if not folder:
return
object.setText(folder)
def openAbout(self):
aboutdialog.AboutDialog(self)
def openAndroidPermissions(self):
perm = self.specObjects['android_permissions']
ret = androidpermissionsdialog.AndroidPermissionsDialog(self)
ret.setValue(perm.text())
ret.accepted.connect(lambda: perm.setText(ret.value))
ret.open()
def openPresplashPicker(self):
object = self.specObjects['android_presplash_color']
dlg = QColorDialog(self)
c = QColor(object.currentText())
dlg.setCurrentColor(c)
color = dlg.getColor()
if not color.isValid():
return
object.setEditText(color.name())
def openSettings(self):
dlg = settingsdialog.SettingsDialog(self)
dlg.setDefaultConfig(self.appDefaults)
dlg.setConfig(self.config)
dlg.accepted.connect(lambda: self.updateConfig(dlg.value()))
dlg.exec()
def openSubmitIssue(self):
submitissuedialog.SubmitIssueDialog(self)
def refreshPreview(self):
logging.info('Refreshing preview text')
preview = self.ui.previewText
preview.clear()
result = self.getSpecOutput()
preview.insertPlainText(result)
def refreshSystemDetails(self):
logging.info('refreshSystemDetails')
textEdit = self.ui.textEditSystem
textEdit.clear()
textEdit.insertPlainText('cmd: buildozer version\n')
out = self.checkCommandOutput('buildozer version')
out = out.replace('# Check configuration tokens', '')
if out:
textEdit.insertPlainText('{0}\n'.format(out))
else:
textEdit.insertPlainText('No output. Is buildozer installed?\n'.format(out))
textEdit.insertPlainText('Pip checking dependency versions.\n')
textEdit.insertPlainText('Note: This check may be using incorrect pip to check dependencies.')
textEdit.insertPlainText('Future releases should improve this method.\n\n')
out = self.checkCommandOutput('pip --version')
textEdit.insertPlainText('{0}'.format(out))
for d in ['cython', 'jinja2', 'pexpect', 'virtualenv']:
cmd = 'pip show {0}'.format(d)
textEdit.insertPlainText('cmd: {0}\n'.format(cmd))
out = self.checkCommandOutput('{0}'.format(cmd))
textEdit.insertPlainText('{0}\n'.format(out))
if not out:
textEdit.insertPlainText('No output. Is {} installed?\n\n'.format(d))
def runCmd(self, cwd='.', cmd=''):
logging.info('runCmd')
print('runCmd')
override_terminal = self.config.get('override_terminal', self.appDefaults['override_terminal'])
override_terminal_cmd = self.config.get('override_terminal_cmd', self.appDefaults['override_terminal_cmd'])
if override_terminal:
override_terminal_cmd = self.config.get('override_terminal', self.appDefaults['override_terminal'])
if platform == 'win32':
return
elif platform == 'linux':
c = '{term} "'.format(term=override_terminal_cmd)
c += 'cd {cwd} '.format(cwd=cwd)
c += '&& {cmd}"'.format(cmd=cmd)
logging.info(c)
call(c, stdin=PIPE, shell=True)
def setObjectValue(self, object, value):
# checkBox?
try:
v = value
if v == 'True':
v = 2
else:
v = 0
object.setCheckState(v)
return
except Exception:
pass
# lineEdit?
try:
object.setText(value)
return
except Exception:
pass
# comboBox?
try:
index = object.findText(str(value))
object.setCurrentIndex(index)
return
except Exception:
pass
# spinBox?
try:
object.setValue(int(value))
return
except Exception:
pass
def updateConfig(self, config):
self.config.update(config)
if self.config['keep_recent_files'] is False:
self.config['recent_files'] = []
self.updateRecentFilesMenu()
self.appSaveConfig()
self.updateTargetNamesMenu()
def updateRecentFiles(self):
logging.info('MainWindow->updateRecentFiles')
keep_recent_files = self.config.get('keep_recent_files', True)
if keep_recent_files is False:
self.config['recent_files'] = []
else:
recent_files = self.config.get('recent_files', [])
if self._specfile in recent_files:
idx = recent_files.index(self._specfile)
del recent_files[idx]
new = set()
for f in recent_files:
new.add(f)
new = list(new)
new.insert(0, self._specfile)
self.config['recent_files'] = new[:10]
self.updateRecentFilesMenu()
self.appSaveConfig()
def updateRecentFilesMenu(self):
recent_files = self.config.get('recent_files', [])
self.ui.menuRecentFiles.clear()
for file in recent_files:
act = QAction(self)
act.setText(file)
self.ui.menuRecentFiles.addAction(act)
act.triggered.connect(partial(self.onRecentFile, file))
self.ui.menuRecentFiles.addSeparator()
act = QAction(self)
act.setText('Clear Recent Files')
self.ui.menuRecentFiles.addAction(act)
if not recent_files:
act.setEnabled(False)
def updateTargetNamesMenu(self):
custom_targets = self.config.get('custom_target', [])
custom_targets = custom_targets.replace('\n', '')
self.comboTarget.clear()
self.comboTarget.addItems(['android', 'android_new', 'android_old', 'ios', 'osx'])
self.comboTarget.addItems(custom_targets.split(','))
def updateTitle(self):
logging.info('MainWindow->updateTitle')
title = 'BuilderTron v{0}'.format(__version__)
if self._specfile:
title += ' - {0}'.format(self._specfile)
self.setWindowTitle(title)
def updateUI(self):
self.specObjects['android_presplash_color'].addItems(sorted(ANDROID_PRESPLASH_COLORS))
self.updateTargetNamesMenu()
def check_root():
"""
If effective user id is 0, display a warning and require
user input to continue (or to cancel)
"""
try:
euid = os.geteuid() == 0
except AttributeError:
if sys.platform == 'win32':
import ctypes
euid = ctypes.windll.shell32.IsUserAnAdmin() != 0
if euid:
print('Running as root! Quitting')
sys.exit()
def process_sys_args():
res = {}
for arg in sys.argv[1:]:
if "=" not in arg:
continue
key, value = arg.split("=")[:2]
res[key.lower()] = value.lower()
return res
def set_logging_level():
# Logging Configuration
try:
v = LOG_LEVELS[SYS_ARGS["--verbose"]]
logging.basicConfig(level=v)
except KeyError:
pass
# logging.basicConfig(level=logging.DEBUG)
# logging.basicConfig(level=logging.INFO)
def main():
check_root()
SYS_ARGS.update(process_sys_args())
set_logging_level()
app = QApplication(sys.argv)
MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
swprojects/Buildertron
|
buildertron/forms/uisettingsdialog.py
|
<filename>buildertron/forms/uisettingsdialog.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settingsdialog.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SettingsDialog(object):
def setupUi(self, SettingsDialog):
SettingsDialog.setObjectName("SettingsDialog")
SettingsDialog.setEnabled(True)
SettingsDialog.resize(700, 500)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(SettingsDialog)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setContentsMargins(5, 5, 5, 5)
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtWidgets.QTabWidget(SettingsDialog)
self.tabWidget.setEnabled(True)
self.tabWidget.setObjectName("tabWidget")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.tab_3)
self.gridLayout_4.setObjectName("gridLayout_4")
self.checkBox_AskSaveOnClose = QtWidgets.QCheckBox(self.tab_3)
self.checkBox_AskSaveOnClose.setEnabled(True)
self.checkBox_AskSaveOnClose.setChecked(True)
self.checkBox_AskSaveOnClose.setObjectName("checkBox_AskSaveOnClose")
self.gridLayout_4.addWidget(self.checkBox_AskSaveOnClose, 2, 0, 1, 1)
self.checkBox_RecentFiles = QtWidgets.QCheckBox(self.tab_3)
self.checkBox_RecentFiles.setChecked(True)
self.checkBox_RecentFiles.setObjectName("checkBox_RecentFiles")
self.gridLayout_4.addWidget(self.checkBox_RecentFiles, 1, 0, 1, 1)
self.checkBox_SplashScreen = QtWidgets.QCheckBox(self.tab_3)
self.checkBox_SplashScreen.setEnabled(True)
self.checkBox_SplashScreen.setChecked(True)
self.checkBox_SplashScreen.setObjectName("checkBox_SplashScreen")
self.gridLayout_4.addWidget(self.checkBox_SplashScreen, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem, 4, 0, 1, 1)
self.tabWidget.addTab(self.tab_3, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout_3 = QtWidgets.QGridLayout(self.tab)
self.gridLayout_3.setObjectName("gridLayout_3")
self.checkBox_Distclean = QtWidgets.QCheckBox(self.tab)
self.checkBox_Distclean.setEnabled(True)
self.checkBox_Distclean.setObjectName("checkBox_Distclean")
self.gridLayout_3.addWidget(self.checkBox_Distclean, 1, 0, 1, 1)
self.checkBox_Terminal = QtWidgets.QCheckBox(self.tab)
self.checkBox_Terminal.setEnabled(True)
self.checkBox_Terminal.setObjectName("checkBox_Terminal")
self.gridLayout_3.addWidget(self.checkBox_Terminal, 9, 0, 1, 1)
self.lineEdit_Clean = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Clean.setEnabled(False)
self.lineEdit_Clean.setObjectName("lineEdit_Clean")
self.gridLayout_3.addWidget(self.lineEdit_Clean, 2, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem1, 10, 1, 1, 1)
self.lineEdit_Deploy = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Deploy.setEnabled(False)
self.lineEdit_Deploy.setObjectName("lineEdit_Deploy")
self.gridLayout_3.addWidget(self.lineEdit_Deploy, 4, 1, 1, 1)
self.checkBox_Run = QtWidgets.QCheckBox(self.tab)
self.checkBox_Run.setEnabled(True)
self.checkBox_Run.setObjectName("checkBox_Run")
self.gridLayout_3.addWidget(self.checkBox_Run, 5, 0, 1, 1)
self.lineEdit_Run = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Run.setEnabled(False)
self.lineEdit_Run.setObjectName("lineEdit_Run")
self.gridLayout_3.addWidget(self.lineEdit_Run, 5, 1, 1, 1)
self.lineEdit_Serve = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Serve.setEnabled(False)
self.lineEdit_Serve.setObjectName("lineEdit_Serve")
self.gridLayout_3.addWidget(self.lineEdit_Serve, 6, 1, 1, 1)
self.checkBox_Deploy = QtWidgets.QCheckBox(self.tab)
self.checkBox_Deploy.setEnabled(True)
self.checkBox_Deploy.setObjectName("checkBox_Deploy")
self.gridLayout_3.addWidget(self.checkBox_Deploy, 4, 0, 1, 1)
self.checkBox_Serve = QtWidgets.QCheckBox(self.tab)
self.checkBox_Serve.setEnabled(True)
self.checkBox_Serve.setObjectName("checkBox_Serve")
self.gridLayout_3.addWidget(self.checkBox_Serve, 6, 0, 1, 1)
self.checkBox_Build = QtWidgets.QCheckBox(self.tab)
self.checkBox_Build.setEnabled(True)
self.checkBox_Build.setObjectName("checkBox_Build")
self.gridLayout_3.addWidget(self.checkBox_Build, 3, 0, 1, 1)
self.lineEdit_Distclean = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Distclean.setEnabled(False)
self.lineEdit_Distclean.setObjectName("lineEdit_Distclean")
self.gridLayout_3.addWidget(self.lineEdit_Distclean, 1, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(self.tab)
self.label_10.setObjectName("label_10")
self.gridLayout_3.addWidget(self.label_10, 8, 1, 1, 1)
self.lineEdit_Terminal = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Terminal.setEnabled(False)
self.lineEdit_Terminal.setObjectName("lineEdit_Terminal")
self.gridLayout_3.addWidget(self.lineEdit_Terminal, 9, 1, 1, 1)
self.label_11 = QtWidgets.QLabel(self.tab)
self.label_11.setText("")
self.label_11.setObjectName("label_11")
self.gridLayout_3.addWidget(self.label_11, 7, 1, 1, 1)
self.checkBox_Clean = QtWidgets.QCheckBox(self.tab)
self.checkBox_Clean.setEnabled(True)
self.checkBox_Clean.setObjectName("checkBox_Clean")
self.gridLayout_3.addWidget(self.checkBox_Clean, 2, 0, 1, 1)
self.lineEdit_Build = QtWidgets.QLineEdit(self.tab)
self.lineEdit_Build.setEnabled(False)
self.lineEdit_Build.setObjectName("lineEdit_Build")
self.gridLayout_3.addWidget(self.lineEdit_Build, 3, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(self.tab)
self.label_6.setObjectName("label_6")
self.gridLayout_3.addWidget(self.label_6, 0, 1, 1, 1)
self.default_Clean = QtWidgets.QPushButton(self.tab)
self.default_Clean.setObjectName("default_Clean")
self.gridLayout_3.addWidget(self.default_Clean, 2, 2, 1, 1)
self.default_Build = QtWidgets.QPushButton(self.tab)
self.default_Build.setObjectName("default_Build")
self.gridLayout_3.addWidget(self.default_Build, 3, 2, 1, 1)
self.default_Deploy = QtWidgets.QPushButton(self.tab)
self.default_Deploy.setObjectName("default_Deploy")
self.gridLayout_3.addWidget(self.default_Deploy, 4, 2, 1, 1)
self.default_Run = QtWidgets.QPushButton(self.tab)
self.default_Run.setObjectName("default_Run")
self.gridLayout_3.addWidget(self.default_Run, 5, 2, 1, 1)
self.default_Serve = QtWidgets.QPushButton(self.tab)
self.default_Serve.setObjectName("default_Serve")
self.gridLayout_3.addWidget(self.default_Serve, 6, 2, 1, 1)
self.default_Terminal = QtWidgets.QPushButton(self.tab)
self.default_Terminal.setObjectName("default_Terminal")
self.gridLayout_3.addWidget(self.default_Terminal, 9, 2, 1, 1)
self.default_Distclean = QtWidgets.QPushButton(self.tab)
self.default_Distclean.setObjectName("default_Distclean")
self.gridLayout_3.addWidget(self.default_Distclean, 1, 2, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.textEdit_CustomTarget = QtWidgets.QPlainTextEdit(self.tab_2)
self.textEdit_CustomTarget.setObjectName("textEdit_CustomTarget")
self.gridLayout_2.addWidget(self.textEdit_CustomTarget, 5, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.tab_2)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.tab_2)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 3, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.tab_2)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 0, 1, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.tabBlacklist = QtWidgets.QWidget()
self.tabBlacklist.setObjectName("tabBlacklist")
self.gridLayout_5 = QtWidgets.QGridLayout(self.tabBlacklist)
self.gridLayout_5.setObjectName("gridLayout_5")
self.checkBox_BlacklistApp = QtWidgets.QCheckBox(self.tabBlacklist)
self.checkBox_BlacklistApp.setObjectName("checkBox_BlacklistApp")
self.gridLayout_5.addWidget(self.checkBox_BlacklistApp, 1, 0, 1, 1)
self.checkBox_BlacklistBuildozer = QtWidgets.QCheckBox(self.tabBlacklist)
self.checkBox_BlacklistBuildozer.setObjectName("checkBox_BlacklistBuildozer")
self.gridLayout_5.addWidget(self.checkBox_BlacklistBuildozer, 4, 0, 1, 1)
self.textEdit_BlacklistApp = QtWidgets.QPlainTextEdit(self.tabBlacklist)
self.textEdit_BlacklistApp.setEnabled(False)
self.textEdit_BlacklistApp.setObjectName("textEdit_BlacklistApp")
self.gridLayout_5.addWidget(self.textEdit_BlacklistApp, 3, 0, 1, 1)
self.textEdit_BlacklistBuildozer = QtWidgets.QPlainTextEdit(self.tabBlacklist)
self.textEdit_BlacklistBuildozer.setEnabled(False)
self.textEdit_BlacklistBuildozer.setObjectName("textEdit_BlacklistBuildozer")
self.gridLayout_5.addWidget(self.textEdit_BlacklistBuildozer, 6, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.tabBlacklist)
self.label_7.setObjectName("label_7")
self.gridLayout_5.addWidget(self.label_7, 2, 0, 1, 1)
self.label_8 = QtWidgets.QLabel(self.tabBlacklist)
self.label_8.setObjectName("label_8")
self.gridLayout_5.addWidget(self.label_8, 5, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.tabBlacklist)
self.label_9.setObjectName("label_9")
self.gridLayout_5.addWidget(self.label_9, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabBlacklist, "")
self.tab_4 = QtWidgets.QWidget()
self.tab_4.setObjectName("tab_4")
self.gridLayout_7 = QtWidgets.QGridLayout(self.tab_4)
self.gridLayout_7.setObjectName("gridLayout_7")
self.checkBox_SubBuildozer = QtWidgets.QCheckBox(self.tab_4)
self.checkBox_SubBuildozer.setObjectName("checkBox_SubBuildozer")
self.gridLayout_7.addWidget(self.checkBox_SubBuildozer, 1, 0, 1, 1)
self.gridLayout_6 = QtWidgets.QGridLayout()
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_12 = QtWidgets.QLabel(self.tab_4)
self.label_12.setObjectName("label_12")
self.gridLayout_6.addWidget(self.label_12, 0, 1, 1, 1)
self.textEdit_SubApp = QtWidgets.QPlainTextEdit(self.tab_4)
self.textEdit_SubApp.setEnabled(False)
self.textEdit_SubApp.setObjectName("textEdit_SubApp")
self.gridLayout_6.addWidget(self.textEdit_SubApp, 2, 1, 1, 1)
self.checkBox_SubApp = QtWidgets.QCheckBox(self.tab_4)
self.checkBox_SubApp.setObjectName("checkBox_SubApp")
self.gridLayout_6.addWidget(self.checkBox_SubApp, 1, 1, 1, 1)
self.gridLayout_7.addLayout(self.gridLayout_6, 0, 0, 1, 1)
self.textEdit_SubBuildozer = QtWidgets.QPlainTextEdit(self.tab_4)
self.textEdit_SubBuildozer.setEnabled(False)
self.textEdit_SubBuildozer.setObjectName("textEdit_SubBuildozer")
self.gridLayout_7.addWidget(self.textEdit_SubBuildozer, 2, 0, 1, 1)
self.tabWidget.addTab(self.tab_4, "")
self.tabTemplate = QtWidgets.QWidget()
self.tabTemplate.setObjectName("tabTemplate")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.tabTemplate)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.scrollArea = QtWidgets.QScrollArea(self.tabTemplate)
self.scrollArea.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 684, 426))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_4.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label.setObjectName("label")
self.verticalLayout_4.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_2.setObjectName("label_2")
self.verticalLayout_4.addWidget(self.label_2)
self.checkBox_CustomSpec = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_CustomSpec.setChecked(False)
self.checkBox_CustomSpec.setObjectName("checkBox_CustomSpec")
self.verticalLayout_4.addWidget(self.checkBox_CustomSpec)
self.widget = QtWidgets.QWidget(self.scrollAreaWidgetContents)
self.widget.setObjectName("widget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.textEdit_CustomSpec = QtWidgets.QTextEdit(self.widget)
self.textEdit_CustomSpec.setEnabled(False)
self.textEdit_CustomSpec.setObjectName("textEdit_CustomSpec")
self.gridLayout.addWidget(self.textEdit_CustomSpec, 6, 2, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.gridLayout.addLayout(self.horizontalLayout_2, 1, 2, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.verticalLayout_4.addWidget(self.widget)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.default_CustomSpec = QtWidgets.QPushButton(self.scrollAreaWidgetContents)
self.default_CustomSpec.setObjectName("default_CustomSpec")
self.horizontalLayout_3.addWidget(self.default_CustomSpec)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_3.addWidget(self.scrollArea)
self.tabWidget.addTab(self.tabTemplate, "")
self.verticalLayout.addWidget(self.tabWidget)
self.buttonBox = QtWidgets.QDialogButtonBox(SettingsDialog)
self.buttonBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Apply|QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Save)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(SettingsDialog)
self.tabWidget.setCurrentIndex(0)
self.buttonBox.accepted.connect(SettingsDialog.accept)
self.buttonBox.rejected.connect(SettingsDialog.reject)
self.checkBox_CustomSpec.toggled['bool'].connect(self.textEdit_CustomSpec.setEnabled)
self.checkBox_Distclean.toggled['bool'].connect(self.lineEdit_Distclean.setEnabled)
self.checkBox_Clean.toggled['bool'].connect(self.lineEdit_Clean.setEnabled)
self.checkBox_Build.toggled['bool'].connect(self.lineEdit_Build.setEnabled)
self.checkBox_Deploy.toggled['bool'].connect(self.lineEdit_Deploy.setEnabled)
self.checkBox_Run.toggled['bool'].connect(self.lineEdit_Run.setEnabled)
self.checkBox_Serve.toggled['bool'].connect(self.lineEdit_Serve.setEnabled)
self.checkBox_Terminal.toggled['bool'].connect(self.lineEdit_Terminal.setEnabled)
self.checkBox_BlacklistApp.toggled['bool'].connect(self.textEdit_BlacklistApp.setEnabled)
self.checkBox_BlacklistBuildozer.toggled['bool'].connect(self.textEdit_BlacklistBuildozer.setEnabled)
self.checkBox_CustomSpec.toggled['bool'].connect(self.textEdit_CustomSpec.setEnabled)
self.checkBox_SubApp.toggled['bool'].connect(self.textEdit_SubApp.setEnabled)
self.checkBox_SubBuildozer.toggled['bool'].connect(self.textEdit_SubBuildozer.setEnabled)
self.buttonBox.clicked['QAbstractButton*'].connect(SettingsDialog.onApply)
QtCore.QMetaObject.connectSlotsByName(SettingsDialog)
def retranslateUi(self, SettingsDialog):
_translate = QtCore.QCoreApplication.translate
SettingsDialog.setWindowTitle(_translate("SettingsDialog", "Settings"))
self.checkBox_AskSaveOnClose.setText(_translate("SettingsDialog", "Ask To Save On Closing Builderton"))
self.checkBox_RecentFiles.setText(_translate("SettingsDialog", "Recent Files History"))
self.checkBox_SplashScreen.setText(_translate("SettingsDialog", "Show Splashscreen"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("SettingsDialog", "General"))
self.checkBox_Distclean.setText(_translate("SettingsDialog", "Distclean"))
self.checkBox_Terminal.setText(_translate("SettingsDialog", "Term"))
self.lineEdit_Clean.setText(_translate("SettingsDialog", "buildozer {target} clean"))
self.lineEdit_Deploy.setText(_translate("SettingsDialog", "buildozer {target} deploy"))
self.checkBox_Run.setText(_translate("SettingsDialog", "Run"))
self.lineEdit_Run.setText(_translate("SettingsDialog", "buildozer {target} run"))
self.lineEdit_Serve.setText(_translate("SettingsDialog", "buildozer {target} serve"))
self.checkBox_Deploy.setText(_translate("SettingsDialog", "Deploy"))
self.checkBox_Serve.setText(_translate("SettingsDialog", "Serve"))
self.checkBox_Build.setText(_translate("SettingsDialog", "Build"))
self.lineEdit_Distclean.setText(_translate("SettingsDialog", "buildozer distclean"))
self.label_10.setText(_translate("SettingsDialog", "Override terminal. This command is prepended to the buildozer commands"))
self.lineEdit_Terminal.setText(_translate("SettingsDialog", "xterm -hold -e "))
self.checkBox_Clean.setText(_translate("SettingsDialog", "Clean"))
self.lineEdit_Build.setText(_translate("SettingsDialog", "buildozer {target} {buildmode}"))
self.label_6.setText(_translate("SettingsDialog", "Override buildozer commands"))
self.default_Clean.setText(_translate("SettingsDialog", "Default"))
self.default_Build.setText(_translate("SettingsDialog", "Default"))
self.default_Deploy.setText(_translate("SettingsDialog", "Default"))
self.default_Run.setText(_translate("SettingsDialog", "Default"))
self.default_Serve.setText(_translate("SettingsDialog", "Default"))
self.default_Terminal.setText(_translate("SettingsDialog", "Default"))
self.default_Distclean.setText(_translate("SettingsDialog", "Default"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("SettingsDialog", "Commands"))
self.label_3.setText(_translate("SettingsDialog", "Add your custom own target names."))
self.label_4.setText(_translate("SettingsDialog", "Target names are split by a newline or a comma. For example: android_small, ios_test"))
self.label_5.setText(_translate("SettingsDialog", "This feature is intended for buildozer developers. Target names will appear in the target drop-down menu."))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("SettingsDialog", "Targets"))
self.checkBox_BlacklistApp.setText(_translate("SettingsDialog", "Use App blacklist"))
self.checkBox_BlacklistBuildozer.setText(_translate("SettingsDialog", "Use Buildozer blacklist"))
self.label_7.setText(_translate("SettingsDialog", "Ignore keys under the [app] section e.g. p4a.dir, p4a.branch"))
self.label_8.setText(_translate("SettingsDialog", "Ignore keys under the [buildozer] section e.g. log_level, bin_dir"))
self.label_9.setText(_translate("SettingsDialog", "Blacklist config keys by skipping writing them to spec file when saving"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabBlacklist), _translate("SettingsDialog", "Blacklist"))
self.checkBox_SubBuildozer.setText(_translate("SettingsDialog", "Substitute key names under [buiildozer] section"))
self.label_12.setText(_translate("SettingsDialog", "You substitute config keys with your own. e.g. p4a.dir=p4a.dir2 will change p4a.dir to p4a.dir2"))
self.checkBox_SubApp.setText(_translate("SettingsDialog", "Substitute key names under [app] section"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("SettingsDialog", "Substitute"))
self.label.setText(_translate("SettingsDialog", "You can define a custom spec layout"))
self.label_2.setText(_translate("SettingsDialog", "Note: Commented lines must begin with #"))
self.checkBox_CustomSpec.setText(_translate("SettingsDialog", "Use custom spec layout"))
self.textEdit_CustomSpec.setHtml(_translate("SettingsDialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">[app]</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">{app}</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">[buildozer]</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">{buildozer}</p></body></html>"))
self.default_CustomSpec.setText(_translate("SettingsDialog", "Restore Default Layout"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabTemplate), _translate("SettingsDialog", "Buildozer Spec Layout"))
|
swprojects/Buildertron
|
buildertron/forms/generate_pyui.pyw
|
<reponame>swprojects/Buildertron
# Generate python modules from UI forms
import os
import os.path
try:
os.chdir(os.path.split(__file__)[0])
except OSError:
pass
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
if not f.endswith('.ui'):
continue
os.system('pyuic5 -o ui{0}.py {1}'.format(f[:-3], f))
print('Done!')
|
swprojects/Buildertron
|
buildertron/forms/mainwindow.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1271, 924)
MainWindow.setDockNestingEnabled(True)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout_5.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_5.setSpacing(6)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.splitterMain = QtWidgets.QSplitter(self.centralWidget)
self.splitterMain.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(50)
sizePolicy.setHeightForWidth(self.splitterMain.sizePolicy().hasHeightForWidth())
self.splitterMain.setSizePolicy(sizePolicy)
self.splitterMain.setMinimumSize(QtCore.QSize(0, 400))
self.splitterMain.setSizeIncrement(QtCore.QSize(0, 0))
self.splitterMain.setBaseSize(QtCore.QSize(0, 0))
self.splitterMain.setFrameShape(QtWidgets.QFrame.NoFrame)
self.splitterMain.setFrameShadow(QtWidgets.QFrame.Plain)
self.splitterMain.setOrientation(QtCore.Qt.Vertical)
self.splitterMain.setOpaqueResize(True)
self.splitterMain.setHandleWidth(0)
self.splitterMain.setChildrenCollapsible(False)
self.splitterMain.setObjectName("splitterMain")
self.splitterTop = QtWidgets.QSplitter(self.splitterMain)
self.splitterTop.setBaseSize(QtCore.QSize(700, 600))
self.splitterTop.setOrientation(QtCore.Qt.Horizontal)
self.splitterTop.setHandleWidth(2)
self.splitterTop.setChildrenCollapsible(False)
self.splitterTop.setObjectName("splitterTop")
self.tabWidget = QtWidgets.QTabWidget(self.splitterTop)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.North)
self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabWidget.setObjectName("tabWidget")
self.Project = QtWidgets.QWidget()
self.Project.setObjectName("Project")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.Project)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.scrollArea = QtWidgets.QScrollArea(self.Project)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea.sizePolicy().hasHeightForWidth())
self.scrollArea.setSizePolicy(sizePolicy)
self.scrollArea.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 623, 578))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.lineEdit_requirements = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_requirements.setEnabled(False)
self.lineEdit_requirements.setObjectName("lineEdit_requirements")
self.gridLayout.addWidget(self.lineEdit_requirements, 14, 2, 1, 1)
self.checkBox_requirements = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_requirements.setText("")
self.checkBox_requirements.setObjectName("checkBox_requirements")
self.gridLayout.addWidget(self.checkBox_requirements, 14, 1, 1, 1)
self.label_78 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_78.setObjectName("label_78")
self.gridLayout.addWidget(self.label_78, 14, 0, 1, 1)
self.lineEdit_icon_filename = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_icon_filename.setEnabled(False)
self.lineEdit_icon_filename.setObjectName("lineEdit_icon_filename")
self.gridLayout.addWidget(self.lineEdit_icon_filename, 17, 2, 1, 1)
self.label_75 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_75.setObjectName("label_75")
self.gridLayout.addWidget(self.label_75, 17, 0, 1, 1)
self.checkBox_icon_filename = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_icon_filename.setText("")
self.checkBox_icon_filename.setObjectName("checkBox_icon_filename")
self.gridLayout.addWidget(self.checkBox_icon_filename, 17, 1, 1, 1)
self.label_77 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_77.setObjectName("label_77")
self.gridLayout.addWidget(self.label_77, 16, 0, 1, 1)
self.lineEdit_presplash = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_presplash.setEnabled(False)
self.lineEdit_presplash.setObjectName("lineEdit_presplash")
self.gridLayout.addWidget(self.lineEdit_presplash, 16, 2, 1, 1)
self.checkBox_presplash = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_presplash.setText("")
self.checkBox_presplash.setObjectName("checkBox_presplash")
self.gridLayout.addWidget(self.checkBox_presplash, 16, 1, 1, 1)
self.label_76 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_76.setObjectName("label_76")
self.gridLayout.addWidget(self.label_76, 15, 0, 1, 1)
self.checkBox_garden_requirements = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_garden_requirements.setText("")
self.checkBox_garden_requirements.setObjectName("checkBox_garden_requirements")
self.gridLayout.addWidget(self.checkBox_garden_requirements, 15, 1, 1, 1)
self.label_79 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.label_79.setFont(font)
self.label_79.setObjectName("label_79")
self.gridLayout.addWidget(self.label_79, 0, 0, 1, 1)
self.lineEdit_garden_requirements = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_garden_requirements.setEnabled(False)
self.lineEdit_garden_requirements.setObjectName("lineEdit_garden_requirements")
self.gridLayout.addWidget(self.lineEdit_garden_requirements, 15, 2, 1, 1)
self.label_29 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_29.setText("")
self.label_29.setObjectName("label_29")
self.gridLayout.addWidget(self.label_29, 5, 0, 1, 1)
self.label_5 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 7, 0, 1, 1)
self.checkBox_source_exclude_dirs = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_source_exclude_dirs.setText("")
self.checkBox_source_exclude_dirs.setObjectName("checkBox_source_exclude_dirs")
self.gridLayout.addWidget(self.checkBox_source_exclude_dirs, 8, 1, 1, 1)
self.toolButton_source_dir = QtWidgets.QToolButton(self.scrollAreaWidgetContents)
self.toolButton_source_dir.setObjectName("toolButton_source_dir")
self.gridLayout.addWidget(self.toolButton_source_dir, 7, 3, 1, 1)
self.label_4 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 4, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 8, 0, 1, 1)
self.checkBox_source_include_exts = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_source_include_exts.setText("")
self.checkBox_source_include_exts.setObjectName("checkBox_source_include_exts")
self.gridLayout.addWidget(self.checkBox_source_include_exts, 9, 1, 1, 1)
self.lineEdit_source_include_exts = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_source_include_exts.setEnabled(False)
self.lineEdit_source_include_exts.setObjectName("lineEdit_source_include_exts")
self.gridLayout.addWidget(self.lineEdit_source_include_exts, 9, 2, 1, 1)
self.lineEdit_title = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_title.setClearButtonEnabled(False)
self.lineEdit_title.setObjectName("lineEdit_title")
self.gridLayout.addWidget(self.lineEdit_title, 1, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.lineEdit_name = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_name.setObjectName("lineEdit_name")
self.gridLayout.addWidget(self.lineEdit_name, 2, 2, 1, 1)
self.lineEdit_source_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_source_dir.setEnabled(True)
self.lineEdit_source_dir.setObjectName("lineEdit_source_dir")
self.gridLayout.addWidget(self.lineEdit_source_dir, 7, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 10, 0, 1, 1)
self.lineEdit_source_exclude_dirs = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_source_exclude_dirs.setEnabled(False)
self.lineEdit_source_exclude_dirs.setObjectName("lineEdit_source_exclude_dirs")
self.gridLayout.addWidget(self.lineEdit_source_exclude_dirs, 8, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 9, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.checkBox_source_exclude_exts = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_source_exclude_exts.setText("")
self.checkBox_source_exclude_exts.setObjectName("checkBox_source_exclude_exts")
self.gridLayout.addWidget(self.checkBox_source_exclude_exts, 10, 1, 1, 1)
self.lineEdit_domain = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_domain.setObjectName("lineEdit_domain")
self.gridLayout.addWidget(self.lineEdit_domain, 4, 2, 1, 1)
self.lineEdit_version = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_version.setObjectName("lineEdit_version")
self.gridLayout.addWidget(self.lineEdit_version, 3, 2, 1, 1)
self.label = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.checkBox_source_exclude_patterns = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_source_exclude_patterns.setText("")
self.checkBox_source_exclude_patterns.setObjectName("checkBox_source_exclude_patterns")
self.gridLayout.addWidget(self.checkBox_source_exclude_patterns, 12, 1, 1, 1)
self.lineEdit_services = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_services.setEnabled(False)
self.lineEdit_services.setObjectName("lineEdit_services")
self.gridLayout.addWidget(self.lineEdit_services, 19, 2, 1, 1)
self.label_11 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 18, 0, 1, 1)
self.label_10 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 12, 0, 1, 1)
self.lineEdit_source_exclude_patterns = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_source_exclude_patterns.setEnabled(False)
self.lineEdit_source_exclude_patterns.setObjectName("lineEdit_source_exclude_patterns")
self.gridLayout.addWidget(self.lineEdit_source_exclude_patterns, 12, 2, 1, 1)
self.label_30 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_30.setText("")
self.label_30.setObjectName("label_30")
self.gridLayout.addWidget(self.label_30, 13, 0, 1, 1)
self.checkBox_source_include_patterns = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_source_include_patterns.setText("")
self.checkBox_source_include_patterns.setObjectName("checkBox_source_include_patterns")
self.gridLayout.addWidget(self.checkBox_source_include_patterns, 11, 1, 1, 1)
self.checkBox_services = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkBox_services.setText("")
self.checkBox_services.setObjectName("checkBox_services")
self.gridLayout.addWidget(self.checkBox_services, 19, 1, 1, 1)
self.comboBox_orientation = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.comboBox_orientation.setObjectName("comboBox_orientation")
self.comboBox_orientation.addItem("")
self.comboBox_orientation.addItem("")
self.comboBox_orientation.addItem("")
self.gridLayout.addWidget(self.comboBox_orientation, 18, 2, 1, 1)
self.label_26 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_26.setObjectName("label_26")
self.gridLayout.addWidget(self.label_26, 19, 0, 1, 1)
self.lineEdit_source_exclude_exts = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_source_exclude_exts.setEnabled(False)
self.lineEdit_source_exclude_exts.setObjectName("lineEdit_source_exclude_exts")
self.gridLayout.addWidget(self.lineEdit_source_exclude_exts, 10, 2, 1, 1)
self.lineEdit_source_include_patterns = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineEdit_source_include_patterns.setEnabled(False)
self.lineEdit_source_include_patterns.setObjectName("lineEdit_source_include_patterns")
self.gridLayout.addWidget(self.lineEdit_source_include_patterns, 11, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 20, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 11, 0, 1, 1)
self.label_31 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.label_31.setFont(font)
self.label_31.setObjectName("label_31")
self.gridLayout.addWidget(self.label_31, 6, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout_3.addWidget(self.scrollArea)
self.tabWidget.addTab(self.Project, "")
self.tabProject = QtWidgets.QWidget()
self.tabProject.setEnabled(True)
self.tabProject.setObjectName("tabProject")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.tabProject)
self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_7.setSpacing(6)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.scrollArea_6 = QtWidgets.QScrollArea(self.tabProject)
self.scrollArea_6.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_6.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_6.setWidgetResizable(True)
self.scrollArea_6.setObjectName("scrollArea_6")
self.scrollAreaWidgetContents_7 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_7.setGeometry(QtCore.QRect(0, 0, 644, 506))
self.scrollAreaWidgetContents_7.setObjectName("scrollAreaWidgetContents_7")
self.gridLayout_4 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_7)
self.gridLayout_4.setContentsMargins(11, 11, 11, 11)
self.gridLayout_4.setSpacing(6)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_64 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_64.setObjectName("label_64")
self.gridLayout_4.addWidget(self.label_64, 0, 0, 1, 1)
self.lineEdit_bin_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.lineEdit_bin_dir.setEnabled(False)
self.lineEdit_bin_dir.setObjectName("lineEdit_bin_dir")
self.gridLayout_4.addWidget(self.lineEdit_bin_dir, 1, 2, 1, 1)
self.checkBox_build_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.checkBox_build_dir.setText("")
self.checkBox_build_dir.setObjectName("checkBox_build_dir")
self.gridLayout_4.addWidget(self.checkBox_build_dir, 0, 1, 1, 1)
self.checkBox_bin_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.checkBox_bin_dir.setText("")
self.checkBox_bin_dir.setObjectName("checkBox_bin_dir")
self.gridLayout_4.addWidget(self.checkBox_bin_dir, 1, 1, 1, 1)
self.label_65 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_65.setObjectName("label_65")
self.gridLayout_4.addWidget(self.label_65, 1, 0, 1, 1)
self.lineEdit_build_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.lineEdit_build_dir.setEnabled(False)
self.lineEdit_build_dir.setObjectName("lineEdit_build_dir")
self.gridLayout_4.addWidget(self.lineEdit_build_dir, 0, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem1, 2, 2, 1, 1)
self.toolButton_build_dir = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolButton_build_dir.setObjectName("toolButton_build_dir")
self.gridLayout_4.addWidget(self.toolButton_build_dir, 0, 3, 1, 1)
self.toolButton_bin_dir = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolButton_bin_dir.setObjectName("toolButton_bin_dir")
self.gridLayout_4.addWidget(self.toolButton_bin_dir, 1, 3, 1, 1)
self.scrollArea_6.setWidget(self.scrollAreaWidgetContents_7)
self.horizontalLayout_7.addWidget(self.scrollArea_6)
self.tabWidget.addTab(self.tabProject, "")
self.tabWidget_2 = QtWidgets.QTabWidget(self.splitterTop)
self.tabWidget_2.setEnabled(True)
self.tabWidget_2.setObjectName("tabWidget_2")
self.tabAndroid = QtWidgets.QWidget()
self.tabAndroid.setObjectName("tabAndroid")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.tabAndroid)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.scrollArea_2 = QtWidgets.QScrollArea(self.tabAndroid)
self.scrollArea_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_2.setLineWidth(0)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName("scrollArea_2")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 591, 506))
self.scrollAreaWidgetContents_2.setAutoFillBackground(True)
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_2)
self.gridLayout_2.setContentsMargins(11, 11, 11, 11)
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_21 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_21.setObjectName("label_21")
self.gridLayout_2.addWidget(self.label_21, 3, 0, 1, 1)
self.lineEdit2_android_minapi = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit2_android_minapi.setObjectName("lineEdit2_android_minapi")
self.gridLayout_2.addWidget(self.lineEdit2_android_minapi, 3, 2, 1, 1)
self.label_13 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_13.setObjectName("label_13")
self.gridLayout_2.addWidget(self.label_13, 9, 0, 1, 1)
self.checkBox_android_ant_path = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox_android_ant_path.setText("")
self.checkBox_android_ant_path.setObjectName("checkBox_android_ant_path")
self.gridLayout_2.addWidget(self.checkBox_android_ant_path, 9, 1, 1, 1)
self.label_20 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_20.setObjectName("label_20")
self.gridLayout_2.addWidget(self.label_20, 2, 0, 1, 1)
self.label_40 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_40.setText("")
self.label_40.setObjectName("label_40")
self.gridLayout_2.addWidget(self.label_40, 1, 0, 1, 1)
self.lineEdit_android_ant_path = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit_android_ant_path.setEnabled(False)
self.lineEdit_android_ant_path.setText("")
self.lineEdit_android_ant_path.setObjectName("lineEdit_android_ant_path")
self.gridLayout_2.addWidget(self.lineEdit_android_ant_path, 9, 2, 1, 1)
self.toolButton_android_ant_path = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolButton_android_ant_path.setObjectName("toolButton_android_ant_path")
self.gridLayout_2.addWidget(self.toolButton_android_ant_path, 9, 3, 1, 1)
self.label_25 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_25.setObjectName("label_25")
self.gridLayout_2.addWidget(self.label_25, 0, 0, 1, 1)
self.comboBox_android_arch = QtWidgets.QComboBox(self.scrollAreaWidgetContents_2)
self.comboBox_android_arch.setObjectName("comboBox_android_arch")
self.comboBox_android_arch.addItem("")
self.comboBox_android_arch.addItem("")
self.comboBox_android_arch.addItem("")
self.gridLayout_2.addWidget(self.comboBox_android_arch, 0, 2, 1, 1)
self.lineEdit2_android_api = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit2_android_api.setObjectName("lineEdit2_android_api")
self.gridLayout_2.addWidget(self.lineEdit2_android_api, 2, 2, 1, 1)
self.label_22 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_22.setObjectName("label_22")
self.gridLayout_2.addWidget(self.label_22, 4, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_16.setObjectName("label_16")
self.gridLayout_2.addWidget(self.label_16, 6, 0, 1, 1)
self.label_41 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_41.setText("")
self.label_41.setObjectName("label_41")
self.gridLayout_2.addWidget(self.label_41, 15, 0, 1, 1)
self.label_19 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_19.setObjectName("label_19")
self.gridLayout_2.addWidget(self.label_19, 7, 0, 1, 1)
self.label_23 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_23.setObjectName("label_23")
self.gridLayout_2.addWidget(self.label_23, 17, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_12.setObjectName("label_12")
self.gridLayout_2.addWidget(self.label_12, 16, 0, 1, 1)
self.toolButton_android_permissions = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolButton_android_permissions.setObjectName("toolButton_android_permissions")
self.gridLayout_2.addWidget(self.toolButton_android_permissions, 17, 3, 1, 1)
self.checkBox_android_presplash_color = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox_android_presplash_color.setText("")
self.checkBox_android_presplash_color.setObjectName("checkBox_android_presplash_color")
self.gridLayout_2.addWidget(self.checkBox_android_presplash_color, 18, 1, 1, 1)
self.label_14 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_14.setObjectName("label_14")
self.gridLayout_2.addWidget(self.label_14, 18, 0, 1, 1)
self.label_18 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_18.setObjectName("label_18")
self.gridLayout_2.addWidget(self.label_18, 5, 0, 1, 1)
self.checkBox2_fullscreen = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox2_fullscreen.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkBox2_fullscreen.setText("")
self.checkBox2_fullscreen.setObjectName("checkBox2_fullscreen")
self.gridLayout_2.addWidget(self.checkBox2_fullscreen, 16, 2, 1, 1)
self.comboBox_android_presplash_color = QtWidgets.QComboBox(self.scrollAreaWidgetContents_2)
self.comboBox_android_presplash_color.setEnabled(False)
self.comboBox_android_presplash_color.setObjectName("comboBox_android_presplash_color")
self.gridLayout_2.addWidget(self.comboBox_android_presplash_color, 18, 2, 1, 1)
self.toolButton_android_presplash_color = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolButton_android_presplash_color.setObjectName("toolButton_android_presplash_color")
self.gridLayout_2.addWidget(self.toolButton_android_presplash_color, 18, 3, 1, 1)
self.checkBox2_android_private_storage = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox2_android_private_storage.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkBox2_android_private_storage.setText("")
self.checkBox2_android_private_storage.setObjectName("checkBox2_android_private_storage")
self.gridLayout_2.addWidget(self.checkBox2_android_private_storage, 19, 2, 1, 1)
self.label_24 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_24.setObjectName("label_24")
self.gridLayout_2.addWidget(self.label_24, 19, 0, 1, 1)
self.label_58 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_58.setObjectName("label_58")
self.gridLayout_2.addWidget(self.label_58, 20, 0, 1, 1)
self.checkBox2_android_wakelock = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox2_android_wakelock.setText("")
self.checkBox2_android_wakelock.setObjectName("checkBox2_android_wakelock")
self.gridLayout_2.addWidget(self.checkBox2_android_wakelock, 20, 2, 1, 1)
self.lineEdit_android_permissions = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit_android_permissions.setObjectName("lineEdit_android_permissions")
self.gridLayout_2.addWidget(self.lineEdit_android_permissions, 17, 2, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(103, 53, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem2, 21, 0, 1, 1)
self.checkBox2_android_skip_update = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox2_android_skip_update.setText("")
self.checkBox2_android_skip_update.setObjectName("checkBox2_android_skip_update")
self.gridLayout_2.addWidget(self.checkBox2_android_skip_update, 6, 2, 1, 1)
self.toolButton_android_sdk_path = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolButton_android_sdk_path.setObjectName("toolButton_android_sdk_path")
self.gridLayout_2.addWidget(self.toolButton_android_sdk_path, 5, 3, 1, 1)
self.checkBox_android_sdk_path = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox_android_sdk_path.setText("")
self.checkBox_android_sdk_path.setObjectName("checkBox_android_sdk_path")
self.gridLayout_2.addWidget(self.checkBox_android_sdk_path, 5, 1, 1, 1)
self.lineEdit_android_sdk_path = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit_android_sdk_path.setEnabled(False)
self.lineEdit_android_sdk_path.setText("")
self.lineEdit_android_sdk_path.setObjectName("lineEdit_android_sdk_path")
self.gridLayout_2.addWidget(self.lineEdit_android_sdk_path, 5, 2, 1, 1)
self.lineEdit2_android_sdk = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit2_android_sdk.setObjectName("lineEdit2_android_sdk")
self.gridLayout_2.addWidget(self.lineEdit2_android_sdk, 4, 2, 1, 1)
self.label_17 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_17.setToolTip("")
self.label_17.setStatusTip("")
self.label_17.setAccessibleName("")
self.label_17.setObjectName("label_17")
self.gridLayout_2.addWidget(self.label_17, 8, 0, 1, 1)
self.lineEdit2_android_ndk = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit2_android_ndk.setObjectName("lineEdit2_android_ndk")
self.gridLayout_2.addWidget(self.lineEdit2_android_ndk, 7, 2, 1, 1)
self.checkBox_android_ndk_path = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkBox_android_ndk_path.setText("")
self.checkBox_android_ndk_path.setObjectName("checkBox_android_ndk_path")
self.gridLayout_2.addWidget(self.checkBox_android_ndk_path, 8, 1, 1, 1)
self.lineEdit_android_ndk_path = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineEdit_android_ndk_path.setEnabled(False)
self.lineEdit_android_ndk_path.setStatusTip("")
self.lineEdit_android_ndk_path.setText("")
self.lineEdit_android_ndk_path.setObjectName("lineEdit_android_ndk_path")
self.gridLayout_2.addWidget(self.lineEdit_android_ndk_path, 8, 2, 1, 1)
self.toolButton_android_ndk_path = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolButton_android_ndk_path.setObjectName("toolButton_android_ndk_path")
self.gridLayout_2.addWidget(self.toolButton_android_ndk_path, 8, 3, 1, 1)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2)
self.horizontalLayout_4.addWidget(self.scrollArea_2)
self.tabWidget_2.addTab(self.tabAndroid, "")
self.tabAndroid2 = QtWidgets.QWidget()
self.tabAndroid2.setObjectName("tabAndroid2")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.tabAndroid2)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.scrollArea_3 = QtWidgets.QScrollArea(self.tabAndroid2)
self.scrollArea_3.setEnabled(True)
self.scrollArea_3.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_3.setWidgetResizable(True)
self.scrollArea_3.setObjectName("scrollArea_3")
self.scrollAreaWidgetContents_4 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_4.setGeometry(QtCore.QRect(0, 0, 570, 765))
self.scrollAreaWidgetContents_4.setObjectName("scrollAreaWidgetContents_4")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_4)
self.verticalLayout_2.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setSpacing(6)
self.gridLayout_3.setObjectName("gridLayout_3")
self.checkBox_android_jars = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_jars.setText("")
self.checkBox_android_jars.setObjectName("checkBox_android_jars")
self.gridLayout_3.addWidget(self.checkBox_android_jars, 1, 1, 1, 1)
self.label_28 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_28.setObjectName("label_28")
self.gridLayout_3.addWidget(self.label_28, 2, 0, 1, 1)
self.checkBox_android_add_aars = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_aars.setText("")
self.checkBox_android_add_aars.setObjectName("checkBox_android_add_aars")
self.gridLayout_3.addWidget(self.checkBox_android_add_aars, 2, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 1, 0, 1, 1)
self.lineEdit_android_jars = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_jars.setEnabled(False)
self.lineEdit_android_jars.setObjectName("lineEdit_android_jars")
self.gridLayout_3.addWidget(self.lineEdit_android_jars, 1, 2, 1, 1)
self.lineEdit_android_add_aars = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_aars.setEnabled(False)
self.lineEdit_android_add_aars.setObjectName("lineEdit_android_add_aars")
self.gridLayout_3.addWidget(self.lineEdit_android_add_aars, 2, 2, 1, 1)
self.label_33 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_33.setObjectName("label_33")
self.gridLayout_3.addWidget(self.label_33, 0, 0, 1, 1)
self.checkBox_android_entrypoint = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_entrypoint.setText("")
self.checkBox_android_entrypoint.setObjectName("checkBox_android_entrypoint")
self.gridLayout_3.addWidget(self.checkBox_android_entrypoint, 0, 1, 1, 1)
self.lineEdit_android_entrypoint = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_entrypoint.setEnabled(False)
self.lineEdit_android_entrypoint.setObjectName("lineEdit_android_entrypoint")
self.gridLayout_3.addWidget(self.lineEdit_android_entrypoint, 0, 2, 1, 1)
self.checkBox_android_manifest_intent_filters = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_manifest_intent_filters.setText("")
self.checkBox_android_manifest_intent_filters.setObjectName("checkBox_android_manifest_intent_filters")
self.gridLayout_3.addWidget(self.checkBox_android_manifest_intent_filters, 7, 1, 1, 1)
self.label_57 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_57.setObjectName("label_57")
self.gridLayout_3.addWidget(self.label_57, 8, 0, 1, 1)
self.checkBox_android_gradle_dependencies = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_gradle_dependencies.setText("")
self.checkBox_android_gradle_dependencies.setObjectName("checkBox_android_gradle_dependencies")
self.gridLayout_3.addWidget(self.checkBox_android_gradle_dependencies, 4, 1, 1, 1)
self.label_39 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_39.setObjectName("label_39")
self.gridLayout_3.addWidget(self.label_39, 5, 0, 1, 1)
self.label_27 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_27.setObjectName("label_27")
self.gridLayout_3.addWidget(self.label_27, 3, 0, 1, 1)
self.checkBox_android_add_java_src = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_java_src.setText("")
self.checkBox_android_add_java_src.setObjectName("checkBox_android_add_java_src")
self.gridLayout_3.addWidget(self.checkBox_android_add_java_src, 3, 1, 1, 1)
self.label_56 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_56.setObjectName("label_56")
self.gridLayout_3.addWidget(self.label_56, 7, 0, 1, 1)
self.lineEdit_android_manifest_intent_filters = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_manifest_intent_filters.setEnabled(False)
self.lineEdit_android_manifest_intent_filters.setObjectName("lineEdit_android_manifest_intent_filters")
self.gridLayout_3.addWidget(self.lineEdit_android_manifest_intent_filters, 7, 2, 1, 1)
self.label_38 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_38.setObjectName("label_38")
self.gridLayout_3.addWidget(self.label_38, 4, 0, 1, 1)
self.lineEdit_android_add_java_src = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_java_src.setEnabled(False)
self.lineEdit_android_add_java_src.setObjectName("lineEdit_android_add_java_src")
self.gridLayout_3.addWidget(self.lineEdit_android_add_java_src, 3, 2, 1, 1)
self.lineEdit_android_gradle_dependencies = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_gradle_dependencies.setEnabled(False)
self.lineEdit_android_gradle_dependencies.setText("")
self.lineEdit_android_gradle_dependencies.setObjectName("lineEdit_android_gradle_dependencies")
self.gridLayout_3.addWidget(self.lineEdit_android_gradle_dependencies, 4, 2, 1, 1)
self.checkBox_android_add_activites = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_activites.setText("")
self.checkBox_android_add_activites.setObjectName("checkBox_android_add_activites")
self.gridLayout_3.addWidget(self.checkBox_android_add_activites, 5, 1, 1, 1)
self.lineEdit_android_add_activites = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_activites.setEnabled(False)
self.lineEdit_android_add_activites.setObjectName("lineEdit_android_add_activites")
self.gridLayout_3.addWidget(self.lineEdit_android_add_activites, 5, 2, 1, 1)
self.label_48 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_48.setText("")
self.label_48.setObjectName("label_48")
self.gridLayout_3.addWidget(self.label_48, 6, 0, 1, 1)
self.label_53 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_53.setObjectName("label_53")
self.gridLayout_3.addWidget(self.label_53, 22, 0, 1, 1)
self.checkBox_android_add_libs_mips = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_libs_mips.setText("")
self.checkBox_android_add_libs_mips.setObjectName("checkBox_android_add_libs_mips")
self.gridLayout_3.addWidget(self.checkBox_android_add_libs_mips, 22, 1, 1, 1)
self.label_51 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_51.setObjectName("label_51")
self.gridLayout_3.addWidget(self.label_51, 20, 0, 1, 1)
self.label_61 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_61.setObjectName("label_61")
self.gridLayout_3.addWidget(self.label_61, 11, 0, 1, 1)
self.lineEdit_android_add_libs_mips = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_libs_mips.setEnabled(False)
self.lineEdit_android_add_libs_mips.setObjectName("lineEdit_android_add_libs_mips")
self.gridLayout_3.addWidget(self.lineEdit_android_add_libs_mips, 22, 2, 1, 1)
self.label_49 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_49.setText("")
self.label_49.setObjectName("label_49")
self.gridLayout_3.addWidget(self.label_49, 23, 0, 1, 1)
self.checkBox_android_manifest_launch_mode = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_manifest_launch_mode.setText("")
self.checkBox_android_manifest_launch_mode.setObjectName("checkBox_android_manifest_launch_mode")
self.gridLayout_3.addWidget(self.checkBox_android_manifest_launch_mode, 8, 1, 1, 1)
self.checkBox_android_library_references = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_library_references.setText("")
self.checkBox_android_library_references.setObjectName("checkBox_android_library_references")
self.gridLayout_3.addWidget(self.checkBox_android_library_references, 11, 1, 1, 1)
self.lineEdit_android_meta_data = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_meta_data.setEnabled(False)
self.lineEdit_android_meta_data.setObjectName("lineEdit_android_meta_data")
self.gridLayout_3.addWidget(self.lineEdit_android_meta_data, 10, 2, 1, 1)
self.checkBox_android_meta_data = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_meta_data.setText("")
self.checkBox_android_meta_data.setObjectName("checkBox_android_meta_data")
self.gridLayout_3.addWidget(self.checkBox_android_meta_data, 10, 1, 1, 1)
self.checkBox_android_logcat_filters = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_logcat_filters.setText("")
self.checkBox_android_logcat_filters.setObjectName("checkBox_android_logcat_filters")
self.gridLayout_3.addWidget(self.checkBox_android_logcat_filters, 12, 1, 1, 1)
self.lineEdit_android_manifest_launch_mode = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_manifest_launch_mode.setEnabled(False)
self.lineEdit_android_manifest_launch_mode.setObjectName("lineEdit_android_manifest_launch_mode")
self.gridLayout_3.addWidget(self.lineEdit_android_manifest_launch_mode, 8, 2, 1, 1)
self.label_60 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_60.setObjectName("label_60")
self.gridLayout_3.addWidget(self.label_60, 12, 0, 1, 1)
self.checkBox_android_copy_libs = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_copy_libs.setText("")
self.checkBox_android_copy_libs.setObjectName("checkBox_android_copy_libs")
self.gridLayout_3.addWidget(self.checkBox_android_copy_libs, 13, 1, 1, 1)
self.label_37 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_37.setFont(font)
self.label_37.setObjectName("label_37")
self.gridLayout_3.addWidget(self.label_37, 18, 0, 1, 1)
self.label_63 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_63.setObjectName("label_63")
self.gridLayout_3.addWidget(self.label_63, 13, 0, 1, 1)
self.label_50 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_50.setObjectName("label_50")
self.gridLayout_3.addWidget(self.label_50, 19, 0, 1, 1)
self.lineEdit_android_copy_libs = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_copy_libs.setEnabled(False)
self.lineEdit_android_copy_libs.setObjectName("lineEdit_android_copy_libs")
self.gridLayout_3.addWidget(self.lineEdit_android_copy_libs, 13, 2, 1, 1)
self.label_55 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_55.setText("")
self.label_55.setObjectName("label_55")
self.gridLayout_3.addWidget(self.label_55, 9, 0, 1, 1)
self.label_62 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_62.setText("")
self.label_62.setObjectName("label_62")
self.gridLayout_3.addWidget(self.label_62, 14, 0, 1, 1)
self.checkBox_android_add_libs_armeabi = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_libs_armeabi.setText("")
self.checkBox_android_add_libs_armeabi.setObjectName("checkBox_android_add_libs_armeabi")
self.gridLayout_3.addWidget(self.checkBox_android_add_libs_armeabi, 19, 1, 1, 1)
self.label_59 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_59.setObjectName("label_59")
self.gridLayout_3.addWidget(self.label_59, 10, 0, 1, 1)
self.checkBox_android_add_libs_armeabi_v7a = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_libs_armeabi_v7a.setText("")
self.checkBox_android_add_libs_armeabi_v7a.setObjectName("checkBox_android_add_libs_armeabi_v7a")
self.gridLayout_3.addWidget(self.checkBox_android_add_libs_armeabi_v7a, 20, 1, 1, 1)
self.lineEdit_android_add_libs_armeabi_v7a = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_libs_armeabi_v7a.setEnabled(False)
self.lineEdit_android_add_libs_armeabi_v7a.setObjectName("lineEdit_android_add_libs_armeabi_v7a")
self.gridLayout_3.addWidget(self.lineEdit_android_add_libs_armeabi_v7a, 20, 2, 1, 1)
self.lineEdit_android_add_libs_armeabi = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_libs_armeabi.setEnabled(False)
self.lineEdit_android_add_libs_armeabi.setObjectName("lineEdit_android_add_libs_armeabi")
self.gridLayout_3.addWidget(self.lineEdit_android_add_libs_armeabi, 19, 2, 1, 1)
self.label_52 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_52.setObjectName("label_52")
self.gridLayout_3.addWidget(self.label_52, 21, 0, 1, 1)
self.checkBox_android_add_libs_x86 = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_add_libs_x86.setText("")
self.checkBox_android_add_libs_x86.setObjectName("checkBox_android_add_libs_x86")
self.gridLayout_3.addWidget(self.checkBox_android_add_libs_x86, 21, 1, 1, 1)
self.lineEdit_android_logcat_filters = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_logcat_filters.setEnabled(False)
self.lineEdit_android_logcat_filters.setObjectName("lineEdit_android_logcat_filters")
self.gridLayout_3.addWidget(self.lineEdit_android_logcat_filters, 12, 2, 1, 1)
self.lineEdit_android_add_libs_x86 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_add_libs_x86.setEnabled(False)
self.lineEdit_android_add_libs_x86.setObjectName("lineEdit_android_add_libs_x86")
self.gridLayout_3.addWidget(self.lineEdit_android_add_libs_x86, 21, 2, 1, 1)
self.lineEdit_android_library_references = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_library_references.setEnabled(False)
self.lineEdit_android_library_references.setObjectName("lineEdit_android_library_references")
self.gridLayout_3.addWidget(self.lineEdit_android_library_references, 11, 2, 1, 1)
self.comboBox_android_ouya_category = QtWidgets.QComboBox(self.scrollAreaWidgetContents_4)
self.comboBox_android_ouya_category.setObjectName("comboBox_android_ouya_category")
self.comboBox_android_ouya_category.addItem("")
self.comboBox_android_ouya_category.addItem("")
self.comboBox_android_ouya_category.addItem("")
self.gridLayout_3.addWidget(self.comboBox_android_ouya_category, 25, 2, 1, 1)
self.label_36 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_36.setObjectName("label_36")
self.gridLayout_3.addWidget(self.label_36, 26, 0, 1, 1)
self.label_54 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_54.setFont(font)
self.label_54.setObjectName("label_54")
self.gridLayout_3.addWidget(self.label_54, 24, 0, 1, 1)
self.label_35 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_35.setObjectName("label_35")
self.gridLayout_3.addWidget(self.label_35, 25, 0, 1, 1)
self.checkBox_android_ouya_icon_filename = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_ouya_icon_filename.setText("")
self.checkBox_android_ouya_icon_filename.setObjectName("checkBox_android_ouya_icon_filename")
self.gridLayout_3.addWidget(self.checkBox_android_ouya_icon_filename, 26, 1, 1, 1)
self.lineEdit_android_ouya_icon_filename = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_ouya_icon_filename.setEnabled(False)
self.lineEdit_android_ouya_icon_filename.setObjectName("lineEdit_android_ouya_icon_filename")
self.gridLayout_3.addWidget(self.lineEdit_android_ouya_icon_filename, 26, 2, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 27, 2, 1, 1)
self.label_68 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_68.setObjectName("label_68")
self.gridLayout_3.addWidget(self.label_68, 16, 0, 1, 1)
self.label_69 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_69.setObjectName("label_69")
self.gridLayout_3.addWidget(self.label_69, 15, 0, 1, 1)
self.checkBox_android_whitelist = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_whitelist.setText("")
self.checkBox_android_whitelist.setObjectName("checkBox_android_whitelist")
self.gridLayout_3.addWidget(self.checkBox_android_whitelist, 15, 1, 1, 1)
self.checkBox_android_blacklist = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkBox_android_blacklist.setText("")
self.checkBox_android_blacklist.setObjectName("checkBox_android_blacklist")
self.gridLayout_3.addWidget(self.checkBox_android_blacklist, 16, 1, 1, 1)
self.label_70 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_70.setText("")
self.label_70.setObjectName("label_70")
self.gridLayout_3.addWidget(self.label_70, 17, 0, 1, 1)
self.lineEdit_android_blacklist = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_blacklist.setEnabled(False)
self.lineEdit_android_blacklist.setObjectName("lineEdit_android_blacklist")
self.gridLayout_3.addWidget(self.lineEdit_android_blacklist, 16, 2, 1, 1)
self.lineEdit_android_whitelist = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineEdit_android_whitelist.setEnabled(False)
self.lineEdit_android_whitelist.setObjectName("lineEdit_android_whitelist")
self.gridLayout_3.addWidget(self.lineEdit_android_whitelist, 15, 2, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_3)
self.scrollArea_3.setWidget(self.scrollAreaWidgetContents_4)
self.horizontalLayout_5.addWidget(self.scrollArea_3)
self.tabWidget_2.addTab(self.tabAndroid2, "")
self.tabP4A = QtWidgets.QWidget()
self.tabP4A.setObjectName("tabP4A")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.tabP4A)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollArea_4 = QtWidgets.QScrollArea(self.tabP4A)
self.scrollArea_4.setEnabled(True)
self.scrollArea_4.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_4.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setObjectName("scrollArea_4")
self.scrollAreaWidgetContents_5 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_5.setGeometry(QtCore.QRect(0, 0, 591, 506))
self.scrollAreaWidgetContents_5.setObjectName("scrollAreaWidgetContents_5")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_5)
self.verticalLayout_3.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout_6 = QtWidgets.QGridLayout()
self.gridLayout_6.setSpacing(6)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_34 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_34.setObjectName("label_34")
self.gridLayout_6.addWidget(self.label_34, 0, 0, 1, 1)
self.checkBox_p4a_branch = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkBox_p4a_branch.setText("")
self.checkBox_p4a_branch.setObjectName("checkBox_p4a_branch")
self.gridLayout_6.addWidget(self.checkBox_p4a_branch, 0, 1, 1, 1)
self.lineEdit_p4a_branch = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_p4a_branch.setEnabled(False)
self.lineEdit_p4a_branch.setObjectName("lineEdit_p4a_branch")
self.gridLayout_6.addWidget(self.lineEdit_p4a_branch, 0, 2, 1, 1)
self.label_42 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_42.setObjectName("label_42")
self.gridLayout_6.addWidget(self.label_42, 1, 0, 1, 1)
self.label_43 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_43.setObjectName("label_43")
self.gridLayout_6.addWidget(self.label_43, 2, 0, 1, 1)
self.label_44 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_44.setObjectName("label_44")
self.gridLayout_6.addWidget(self.label_44, 3, 0, 1, 1)
self.label_45 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_45.setObjectName("label_45")
self.gridLayout_6.addWidget(self.label_45, 4, 0, 1, 1)
self.label_46 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_46.setObjectName("label_46")
self.gridLayout_6.addWidget(self.label_46, 5, 0, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(20, 13, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem4, 6, 2, 1, 1)
self.checkBox_p4a_port = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkBox_p4a_port.setText("")
self.checkBox_p4a_port.setObjectName("checkBox_p4a_port")
self.gridLayout_6.addWidget(self.checkBox_p4a_port, 5, 1, 1, 1)
self.checkBox_p4a_bootstrap = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkBox_p4a_bootstrap.setText("")
self.checkBox_p4a_bootstrap.setObjectName("checkBox_p4a_bootstrap")
self.gridLayout_6.addWidget(self.checkBox_p4a_bootstrap, 4, 1, 1, 1)
self.checkBox_p4a_hook = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkBox_p4a_hook.setText("")
self.checkBox_p4a_hook.setObjectName("checkBox_p4a_hook")
self.gridLayout_6.addWidget(self.checkBox_p4a_hook, 3, 1, 1, 1)
self.checkBox_p4a_local_recipes = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkBox_p4a_local_recipes.setText("")
self.checkBox_p4a_local_recipes.setObjectName("checkBox_p4a_local_recipes")
self.gridLayout_6.addWidget(self.checkBox_p4a_local_recipes, 2, 1, 1, 1)
self.checkBox_p4a_source_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkBox_p4a_source_dir.setText("")
self.checkBox_p4a_source_dir.setObjectName("checkBox_p4a_source_dir")
self.gridLayout_6.addWidget(self.checkBox_p4a_source_dir, 1, 1, 1, 1)
self.lineEdit_p4a_port = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_p4a_port.setEnabled(False)
self.lineEdit_p4a_port.setObjectName("lineEdit_p4a_port")
self.gridLayout_6.addWidget(self.lineEdit_p4a_port, 5, 2, 1, 1)
self.lineEdit_p4a_bootstrap = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_p4a_bootstrap.setEnabled(False)
self.lineEdit_p4a_bootstrap.setObjectName("lineEdit_p4a_bootstrap")
self.gridLayout_6.addWidget(self.lineEdit_p4a_bootstrap, 4, 2, 1, 1)
self.lineEdit_p4a_hook = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_p4a_hook.setEnabled(False)
self.lineEdit_p4a_hook.setObjectName("lineEdit_p4a_hook")
self.gridLayout_6.addWidget(self.lineEdit_p4a_hook, 3, 2, 1, 1)
self.lineEdit_p4a_local_recipes = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_p4a_local_recipes.setEnabled(False)
self.lineEdit_p4a_local_recipes.setObjectName("lineEdit_p4a_local_recipes")
self.gridLayout_6.addWidget(self.lineEdit_p4a_local_recipes, 2, 2, 1, 1)
self.lineEdit_p4a_source_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineEdit_p4a_source_dir.setEnabled(False)
self.lineEdit_p4a_source_dir.setObjectName("lineEdit_p4a_source_dir")
self.gridLayout_6.addWidget(self.lineEdit_p4a_source_dir, 1, 2, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_6)
self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_5)
self.horizontalLayout.addWidget(self.scrollArea_4)
self.tabWidget_2.addTab(self.tabP4A, "")
self.tabIOS = QtWidgets.QWidget()
self.tabIOS.setObjectName("tabIOS")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.tabIOS)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.scrollArea_5 = QtWidgets.QScrollArea(self.tabIOS)
self.scrollArea_5.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_5.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_5.setWidgetResizable(True)
self.scrollArea_5.setObjectName("scrollArea_5")
self.scrollAreaWidgetContents_6 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_6.setGeometry(QtCore.QRect(0, 0, 591, 506))
self.scrollAreaWidgetContents_6.setObjectName("scrollAreaWidgetContents_6")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_6)
self.verticalLayout_4.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_4.setSpacing(6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setSpacing(6)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_66 = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.label_66.setObjectName("label_66")
self.gridLayout_5.addWidget(self.label_66, 1, 0, 1, 1)
self.checkBox_ios_codesign_debug = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_6)
self.checkBox_ios_codesign_debug.setText("")
self.checkBox_ios_codesign_debug.setObjectName("checkBox_ios_codesign_debug")
self.gridLayout_5.addWidget(self.checkBox_ios_codesign_debug, 1, 1, 1, 1)
self.lineEdit_ios_codesign_debug = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_6)
self.lineEdit_ios_codesign_debug.setEnabled(False)
self.lineEdit_ios_codesign_debug.setObjectName("lineEdit_ios_codesign_debug")
self.gridLayout_5.addWidget(self.lineEdit_ios_codesign_debug, 1, 2, 1, 1)
self.checkBox_ios_kivy_ios_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_6)
self.checkBox_ios_kivy_ios_dir.setText("")
self.checkBox_ios_kivy_ios_dir.setObjectName("checkBox_ios_kivy_ios_dir")
self.gridLayout_5.addWidget(self.checkBox_ios_kivy_ios_dir, 0, 1, 1, 1)
self.label_47 = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.label_47.setObjectName("label_47")
self.gridLayout_5.addWidget(self.label_47, 0, 0, 1, 1)
self.lineEdit_ios_kivy_ios_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_6)
self.lineEdit_ios_kivy_ios_dir.setEnabled(False)
self.lineEdit_ios_kivy_ios_dir.setObjectName("lineEdit_ios_kivy_ios_dir")
self.gridLayout_5.addWidget(self.lineEdit_ios_kivy_ios_dir, 0, 2, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem5, 3, 2, 1, 1)
self.label_67 = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.label_67.setObjectName("label_67")
self.gridLayout_5.addWidget(self.label_67, 2, 0, 1, 1)
self.checkBox_ios_codesign_release = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_6)
self.checkBox_ios_codesign_release.setText("")
self.checkBox_ios_codesign_release.setObjectName("checkBox_ios_codesign_release")
self.gridLayout_5.addWidget(self.checkBox_ios_codesign_release, 2, 1, 1, 1)
self.lineEdit_ios_codesign_release = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_6)
self.lineEdit_ios_codesign_release.setEnabled(False)
self.lineEdit_ios_codesign_release.setObjectName("lineEdit_ios_codesign_release")
self.gridLayout_5.addWidget(self.lineEdit_ios_codesign_release, 2, 2, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout_5)
self.scrollArea_5.setWidget(self.scrollAreaWidgetContents_6)
self.horizontalLayout_6.addWidget(self.scrollArea_5)
self.tabWidget_2.addTab(self.tabIOS, "")
self.tabOSX = QtWidgets.QWidget()
self.tabOSX.setObjectName("tabOSX")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.tabOSX)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setSpacing(6)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.scrollArea_7 = QtWidgets.QScrollArea(self.tabOSX)
self.scrollArea_7.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_7.setLineWidth(0)
self.scrollArea_7.setWidgetResizable(True)
self.scrollArea_7.setObjectName("scrollArea_7")
self.scrollAreaWidgetContents_8 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_8.setGeometry(QtCore.QRect(0, 0, 591, 506))
self.scrollAreaWidgetContents_8.setObjectName("scrollAreaWidgetContents_8")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_8)
self.verticalLayout_6.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_6.setSpacing(6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setSpacing(6)
self.gridLayout_7.setObjectName("gridLayout_7")
self.label_71 = QtWidgets.QLabel(self.scrollAreaWidgetContents_8)
self.label_71.setObjectName("label_71")
self.gridLayout_7.addWidget(self.label_71, 1, 0, 1, 1)
self.label_72 = QtWidgets.QLabel(self.scrollAreaWidgetContents_8)
self.label_72.setObjectName("label_72")
self.gridLayout_7.addWidget(self.label_72, 2, 0, 1, 1)
self.spinBox_osx_python_version = QtWidgets.QSpinBox(self.scrollAreaWidgetContents_8)
self.spinBox_osx_python_version.setMinimum(2)
self.spinBox_osx_python_version.setProperty("value", 3)
self.spinBox_osx_python_version.setObjectName("spinBox_osx_python_version")
self.gridLayout_7.addWidget(self.spinBox_osx_python_version, 1, 2, 1, 1)
self.lineEdit_osx_kivy_version = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_8)
self.lineEdit_osx_kivy_version.setEnabled(False)
self.lineEdit_osx_kivy_version.setObjectName("lineEdit_osx_kivy_version")
self.gridLayout_7.addWidget(self.lineEdit_osx_kivy_version, 2, 2, 1, 1)
self.checkBox_osx_kivy_version = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_8)
self.checkBox_osx_kivy_version.setText("")
self.checkBox_osx_kivy_version.setObjectName("checkBox_osx_kivy_version")
self.gridLayout_7.addWidget(self.checkBox_osx_kivy_version, 2, 1, 1, 1)
self.label_73 = QtWidgets.QLabel(self.scrollAreaWidgetContents_8)
self.label_73.setObjectName("label_73")
self.gridLayout_7.addWidget(self.label_73, 0, 0, 1, 1)
self.lineEdit_author = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_8)
self.lineEdit_author.setObjectName("lineEdit_author")
self.gridLayout_7.addWidget(self.lineEdit_author, 0, 2, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_7)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem6)
self.scrollArea_7.setWidget(self.scrollAreaWidgetContents_8)
self.horizontalLayout_8.addWidget(self.scrollArea_7)
self.tabWidget_2.addTab(self.tabOSX, "")
self.widget = QtWidgets.QWidget(self.splitterMain)
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_2.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.tabWidget_3 = QtWidgets.QTabWidget(self.widget)
self.tabWidget_3.setObjectName("tabWidget_3")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.verticalLayout = QtWidgets.QVBoxLayout(self.tab_3)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.textEdit = QtWidgets.QTextEdit(self.tab_3)
self.textEdit.setAutoFillBackground(False)
self.textEdit.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textEdit.setFrameShadow(QtWidgets.QFrame.Raised)
self.textEdit.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.tabWidget_3.addTab(self.tab_3, "")
self.horizontalLayout_2.addWidget(self.tabWidget_3)
self.verticalLayout_5.addWidget(self.splitterMain)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1271, 26))
self.menuBar.setObjectName("menuBar")
self.menuFile = QtWidgets.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
self.menuSettings = QtWidgets.QMenu(self.menuBar)
self.menuSettings.setObjectName("menuSettings")
self.menuHelp = QtWidgets.QMenu(self.menuBar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menuBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionSettings = QtWidgets.QAction(MainWindow)
self.actionSettings.setObjectName("actionSettings")
self.menuSettings.addAction(self.actionSettings)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuSettings.menuAction())
self.menuBar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.tabWidget_2.setCurrentIndex(0)
self.tabWidget_3.setCurrentIndex(0)
self.checkBox_source_exclude_dirs.toggled['bool'].connect(self.lineEdit_source_exclude_dirs.setEnabled)
self.checkBox_source_exclude_patterns.toggled['bool'].connect(self.lineEdit_source_exclude_patterns.setEnabled)
self.checkBox_source_exclude_exts.toggled['bool'].connect(self.lineEdit_source_exclude_exts.setEnabled)
self.checkBox_source_include_patterns.toggled['bool'].connect(self.lineEdit_source_include_patterns.setEnabled)
self.checkBox_source_include_exts.toggled['bool'].connect(self.lineEdit_source_include_exts.setEnabled)
self.checkBox_services.toggled['bool'].connect(self.lineEdit_services.setEnabled)
self.checkBox_android_ant_path.toggled['bool'].connect(self.lineEdit_android_ant_path.setEnabled)
self.checkBox_android_ndk_path.toggled['bool'].connect(self.lineEdit_android_ndk_path.setEnabled)
self.checkBox_android_presplash_color.toggled['bool'].connect(self.comboBox_android_presplash_color.setEnabled)
self.checkBox_android_sdk_path.toggled['bool'].connect(self.lineEdit_android_sdk_path.setEnabled)
self.checkBox_requirements.toggled['bool'].connect(self.lineEdit_requirements.setEnabled)
self.checkBox_garden_requirements.toggled['bool'].connect(self.lineEdit_garden_requirements.setEnabled)
self.checkBox_presplash.toggled['bool'].connect(self.lineEdit_presplash.setEnabled)
self.checkBox_icon_filename.toggled['bool'].connect(self.lineEdit_icon_filename.setEnabled)
self.checkBox_android_ant_path.toggled['bool'].connect(self.lineEdit_android_ant_path.setEnabled)
self.checkBox_android_entrypoint.toggled['bool'].connect(self.lineEdit_android_entrypoint.setEnabled)
self.checkBox_android_add_aars.toggled['bool'].connect(self.lineEdit_android_add_aars.setEnabled)
self.checkBox_android_add_java_src.toggled['bool'].connect(self.lineEdit_android_add_java_src.setEnabled)
self.checkBox_android_gradle_dependencies.toggled['bool'].connect(self.lineEdit_android_gradle_dependencies.setEnabled)
self.checkBox_android_add_activites.toggled['bool'].connect(self.lineEdit_android_add_activites.setEnabled)
self.checkBox_android_manifest_intent_filters.toggled['bool'].connect(self.lineEdit_android_manifest_intent_filters.setEnabled)
self.checkBox_android_manifest_launch_mode.toggled['bool'].connect(self.lineEdit_android_manifest_launch_mode.setEnabled)
self.checkBox_android_meta_data.toggled['bool'].connect(self.lineEdit_android_meta_data.setEnabled)
self.checkBox_android_library_references.toggled['bool'].connect(self.lineEdit_android_library_references.setEnabled)
self.checkBox_android_logcat_filters.toggled['bool'].connect(self.lineEdit_android_logcat_filters.setEnabled)
self.checkBox_android_copy_libs.toggled['bool'].connect(self.lineEdit_android_copy_libs.setEnabled)
self.checkBox_android_whitelist.toggled['bool'].connect(self.lineEdit_android_whitelist.setEnabled)
self.checkBox_android_blacklist.toggled['bool'].connect(self.lineEdit_android_blacklist.setEnabled)
self.checkBox_android_add_libs_armeabi.toggled['bool'].connect(self.lineEdit_android_add_libs_armeabi.setEnabled)
self.checkBox_android_add_libs_armeabi_v7a.toggled['bool'].connect(self.lineEdit_android_add_libs_armeabi_v7a.setEnabled)
self.checkBox_android_add_libs_x86.toggled['bool'].connect(self.lineEdit_android_add_libs_x86.setEnabled)
self.checkBox_android_add_libs_mips.toggled['bool'].connect(self.lineEdit_android_add_libs_mips.setEnabled)
self.checkBox_android_ouya_icon_filename.toggled['bool'].connect(self.lineEdit_android_ouya_icon_filename.setEnabled)
self.checkBox_p4a_branch.toggled['bool'].connect(self.lineEdit_p4a_branch.setEnabled)
self.checkBox_p4a_source_dir.toggled['bool'].connect(self.lineEdit_p4a_source_dir.setEnabled)
self.checkBox_p4a_local_recipes.toggled['bool'].connect(self.lineEdit_p4a_local_recipes.setEnabled)
self.checkBox_p4a_hook.toggled['bool'].connect(self.lineEdit_p4a_hook.setEnabled)
self.checkBox_p4a_bootstrap.toggled['bool'].connect(self.lineEdit_p4a_bootstrap.setEnabled)
self.checkBox_p4a_port.toggled['bool'].connect(self.lineEdit_p4a_port.setEnabled)
self.checkBox_ios_kivy_ios_dir.toggled['bool'].connect(self.lineEdit_ios_kivy_ios_dir.setEnabled)
self.checkBox_ios_codesign_debug.toggled['bool'].connect(self.lineEdit_ios_codesign_debug.setEnabled)
self.checkBox_ios_codesign_release.toggled['bool'].connect(self.lineEdit_ios_codesign_release.setEnabled)
self.checkBox_osx_kivy_version.toggled['bool'].connect(self.lineEdit_osx_kivy_version.setEnabled)
self.checkBox_build_dir.toggled['bool'].connect(self.lineEdit_build_dir.setEnabled)
self.checkBox_bin_dir.toggled['bool'].connect(self.lineEdit_bin_dir.setEnabled)
self.menuBar.triggered['QAction*'].connect(MainWindow.onMenubar)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.lineEdit_requirements.setText(_translate("MainWindow", "sqlite3,kivy"))
self.label_78.setText(_translate("MainWindow", "Application Requirements:"))
self.lineEdit_icon_filename.setText(_translate("MainWindow", "%(source.dir)s/data/icon.png"))
self.label_75.setText(_translate("MainWindow", "Application Icon:"))
self.label_77.setText(_translate("MainWindow", "Presplash Image:"))
self.lineEdit_presplash.setToolTip(_translate("MainWindow", "Presplash of the application"))
self.lineEdit_presplash.setText(_translate("MainWindow", "%(source.dir)s/data/presplash.png"))
self.label_76.setText(_translate("MainWindow", "Garden Requirements:"))
self.label_79.setText(_translate("MainWindow", "Application"))
self.label_5.setText(_translate("MainWindow", "Source Dir:"))
self.toolButton_source_dir.setText(_translate("MainWindow", "..."))
self.label_4.setText(_translate("MainWindow", "Domain:"))
self.label_9.setText(_translate("MainWindow", "Source Exclude Dirs:"))
self.lineEdit_source_include_exts.setToolTip(_translate("MainWindow", "# (list) Source files to include (let empty to include all the files)"))
self.lineEdit_source_include_exts.setText(_translate("MainWindow", "py,png,jpg,kv,atlas"))
self.lineEdit_title.setText(_translate("MainWindow", "AppTitle"))
self.label_2.setText(_translate("MainWindow", "Package Name:"))
self.lineEdit_name.setText(_translate("MainWindow", "myapp"))
self.lineEdit_source_dir.setToolTip(_translate("MainWindow", "Source code where the main.py"))
self.lineEdit_source_dir.setText(_translate("MainWindow", "."))
self.label_8.setText(_translate("MainWindow", "Source Exclude Exts:"))
self.lineEdit_source_exclude_dirs.setToolTip(_translate("MainWindow", "List of directory to exclude (let empty to not exclude anything)"))
self.lineEdit_source_exclude_dirs.setText(_translate("MainWindow", "tests,bin"))
self.label_6.setText(_translate("MainWindow", "Source Include Exts:"))
self.label_3.setText(_translate("MainWindow", "Version:"))
self.lineEdit_domain.setToolTip(_translate("MainWindow", "# (str) Package domain (needed for android/ios packaging)"))
self.lineEdit_domain.setText(_translate("MainWindow", "org.test"))
self.lineEdit_version.setText(_translate("MainWindow", "0.1"))
self.label.setText(_translate("MainWindow", "Title:"))
self.lineEdit_services.setToolTip(_translate("MainWindow", "List of service to declare"))
self.lineEdit_services.setText(_translate("MainWindow", "NAME:ENTRYPOINT_TO_PY,NAME2:ENTRYPOINT2_TO_PY"))
self.label_11.setText(_translate("MainWindow", "Orientation:"))
self.label_10.setText(_translate("MainWindow", "Source Exclude Patterns:"))
self.lineEdit_source_exclude_patterns.setToolTip(_translate("MainWindow", "# (list) List of exclusions using pattern matching"))
self.lineEdit_source_exclude_patterns.setText(_translate("MainWindow", "license,images/*/*.jpg"))
self.comboBox_orientation.setToolTip(_translate("MainWindow", "# (str) Supported orientation (one of landscape, portrait or all)"))
self.comboBox_orientation.setItemText(0, _translate("MainWindow", "Landscape"))
self.comboBox_orientation.setItemText(1, _translate("MainWindow", "Portrait"))
self.comboBox_orientation.setItemText(2, _translate("MainWindow", "All"))
self.label_26.setText(_translate("MainWindow", "Services:"))
self.lineEdit_source_exclude_exts.setToolTip(_translate("MainWindow", "# (list) Source files to exclude (let empty to not exclude anything)"))
self.lineEdit_source_exclude_exts.setText(_translate("MainWindow", "spec"))
self.lineEdit_source_include_patterns.setToolTip(_translate("MainWindow", "# (list) List of inclusions using pattern matching"))
self.lineEdit_source_include_patterns.setText(_translate("MainWindow", "assets/*,images/*.png"))
self.label_7.setText(_translate("MainWindow", "Source Include Patterns:"))
self.label_31.setText(_translate("MainWindow", "Source"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Project), _translate("MainWindow", "Project"))
self.label_64.setText(_translate("MainWindow", "Build Dir:"))
self.lineEdit_bin_dir.setToolTip(_translate("MainWindow", "(str) Path to build output (i.e. .apk, .ipa) storage"))
self.lineEdit_bin_dir.setText(_translate("MainWindow", "./bin"))
self.label_65.setText(_translate("MainWindow", "Bin Dir:"))
self.lineEdit_build_dir.setToolTip(_translate("MainWindow", "(str) Path to build artifact storage, absolute or relative to spec file"))
self.lineEdit_build_dir.setText(_translate("MainWindow", "./.buildozer"))
self.toolButton_build_dir.setText(_translate("MainWindow", "..."))
self.toolButton_bin_dir.setText(_translate("MainWindow", "..."))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabProject), _translate("MainWindow", "Build"))
self.label_21.setText(_translate("MainWindow", "Android Min API:"))
self.lineEdit2_android_minapi.setToolTip(_translate("MainWindow", "Minimum API required"))
self.lineEdit2_android_minapi.setText(_translate("MainWindow", "19"))
self.label_13.setText(_translate("MainWindow", "Android ANT Path:"))
self.label_20.setText(_translate("MainWindow", "Android API:"))
self.lineEdit_android_ant_path.setToolTip(_translate("MainWindow", "Android ANT directory (if empty, it will be automatically downloaded.)"))
self.toolButton_android_ant_path.setText(_translate("MainWindow", "..."))
self.label_25.setText(_translate("MainWindow", "Android Arch:"))
self.comboBox_android_arch.setToolTip(_translate("MainWindow", "The Android arch to build for"))
self.comboBox_android_arch.setItemText(0, _translate("MainWindow", "armeabi-v7a"))
self.comboBox_android_arch.setItemText(1, _translate("MainWindow", "arm64-v8a"))
self.comboBox_android_arch.setItemText(2, _translate("MainWindow", "x86"))
self.lineEdit2_android_api.setToolTip(_translate("MainWindow", " Android API to use"))
self.lineEdit2_android_api.setText(_translate("MainWindow", "19"))
self.label_22.setText(_translate("MainWindow", "Android SDK:"))
self.label_16.setText(_translate("MainWindow", "Skip SDK Update:"))
self.label_19.setText(_translate("MainWindow", "Android NDK:"))
self.label_23.setText(_translate("MainWindow", "Permissions:"))
self.label_12.setText(_translate("MainWindow", "Fullscreen:"))
self.toolButton_android_permissions.setText(_translate("MainWindow", "..."))
self.label_14.setText(_translate("MainWindow", "Presplash Color:"))
self.label_18.setText(_translate("MainWindow", "Android SDK Path:"))
self.toolButton_android_presplash_color.setText(_translate("MainWindow", "..."))
self.checkBox2_android_private_storage.setToolTip(_translate("MainWindow", "(bool) Use --private data storage (True) or --dir public storage (False)"))
self.label_24.setText(_translate("MainWindow", "Private Storage:"))
self.label_58.setText(_translate("MainWindow", "Wakelock:"))
self.checkBox2_android_wakelock.setToolTip(_translate("MainWindow", "# (bool) Indicate whether the screen should stay on\n"
"# Don\'t forget to add the WAKE_LOCK permission if you set this to True"))
self.lineEdit_android_permissions.setText(_translate("MainWindow", "INTERNET"))
self.checkBox2_android_skip_update.setToolTip(_translate("MainWindow", "If checked, then skip trying to update the Android sdk"))
self.toolButton_android_sdk_path.setText(_translate("MainWindow", "..."))
self.lineEdit_android_sdk_path.setToolTip(_translate("MainWindow", "Android SDK directory (if empty, it will be automatically downloaded.)"))
self.lineEdit2_android_sdk.setText(_translate("MainWindow", "20"))
self.label_17.setText(_translate("MainWindow", "Android NDK Path:"))
self.lineEdit2_android_ndk.setToolTip(_translate("MainWindow", "Android NDK version to use"))
self.lineEdit2_android_ndk.setText(_translate("MainWindow", "9c"))
self.lineEdit_android_ndk_path.setToolTip(_translate("MainWindow", "Android NDK directory (if empty, it will be automatically downloaded.)"))
self.toolButton_android_ndk_path.setText(_translate("MainWindow", "..."))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabAndroid), _translate("MainWindow", "Android"))
self.label_28.setToolTip(_translate("MainWindow", " # (list) Android AAR archives to add (currently works only with sdl2_gradle\n"
" # bootstrap)"))
self.label_28.setText(_translate("MainWindow", "Android AAR Archives:"))
self.label_15.setText(_translate("MainWindow", "Android Jars:"))
self.lineEdit_android_jars.setToolTip(_translate("MainWindow", "# (list) List of Java .jar files to add to the libs so that pyjnius can access\n"
" # their classes. Don\'t add jars that you do not need, since extra jars can slow\n"
" # down the build process. Allows wildcards matching, for example:\n"
" # OUYA-ODK/libs/*.jar\n"
" #android_add_jars: foo.jar,bar.jar,path/to/more/*.jar"))
self.lineEdit_android_jars.setText(_translate("MainWindow", "foo.jar,bar.jar,path/to/more/*.jar"))
self.lineEdit_android_add_aars.setToolTip(_translate("MainWindow", "# (list) Android AAR archives to add (currently works only with sdl2_gradle\n"
"# bootstrap"))
self.label_33.setText(_translate("MainWindow", "Android Entry Point:"))
self.lineEdit_android_entrypoint.setToolTip(_translate("MainWindow", "(str) Android entry point, default is ok for Kivy-based app"))
self.lineEdit_android_entrypoint.setText(_translate("MainWindow", "org.renpy.android.PythonActivity"))
self.label_57.setText(_translate("MainWindow", "Manifest Launch Mode:"))
self.label_39.setText(_translate("MainWindow", "Activities (Java classes):"))
self.label_27.setToolTip(_translate("MainWindow", "# (list) List of Java files to add to the android project (can be java or a\n"
" # directory containing the files)"))
self.label_27.setText(_translate("MainWindow", "Java Source:"))
self.label_56.setText(_translate("MainWindow", "Manifest Intent Filters:"))
self.lineEdit_android_manifest_intent_filters.setToolTip(_translate("MainWindow", "# (str) XML file to include as an intent filters in <activity> tag"))
self.label_38.setText(_translate("MainWindow", "Gradle Dependencies:"))
self.lineEdit_android_add_java_src.setToolTip(_translate("MainWindow", "# (list) List of Java files to add to the android project (can be java or a\n"
"# directory containing the files)"))
self.lineEdit_android_gradle_dependencies.setToolTip(_translate("MainWindow", "(list) Gradle dependencies to add (currently works only with sdl2_gradle # bootstrap)"))
self.lineEdit_android_add_activites.setToolTip(_translate("MainWindow", "# (list) Java classes to add as activities to the manifest."))
self.lineEdit_android_add_activites.setText(_translate("MainWindow", "com.example.ExampleActivity"))
self.label_53.setText(_translate("MainWindow", "MIPS Libs"))
self.label_51.setText(_translate("MainWindow", "Armeabi v7a Libs"))
self.label_61.setText(_translate("MainWindow", "Library References:"))
self.lineEdit_android_add_libs_mips.setText(_translate("MainWindow", "libs/android-mips/*.so"))
self.lineEdit_android_meta_data.setToolTip(_translate("MainWindow", "# (list) Android application meta-data to set (key=value format)"))
self.lineEdit_android_manifest_launch_mode.setToolTip(_translate("MainWindow", "# (str) launchMode to set for the main activity"))
self.lineEdit_android_manifest_launch_mode.setText(_translate("MainWindow", "standard"))
self.label_60.setText(_translate("MainWindow", "Logcat Filters:"))
self.label_37.setToolTip(_translate("MainWindow", "# (list) Android additionnal libraries to copy into libs/armeabi"))
self.label_37.setText(_translate("MainWindow", "Additional Libraries"))
self.label_63.setText(_translate("MainWindow", "Copy Libraries:"))
self.label_50.setText(_translate("MainWindow", "Armeabi Libs"))
self.lineEdit_android_copy_libs.setToolTip(_translate("MainWindow", "(bool) Copy library instead of making a libpymodules.so"))
self.label_59.setText(_translate("MainWindow", "Android Meta-data:"))
self.lineEdit_android_add_libs_armeabi_v7a.setText(_translate("MainWindow", "libs/android-v7/*.so"))
self.lineEdit_android_add_libs_armeabi.setText(_translate("MainWindow", "libs/android/*.so"))
self.label_52.setText(_translate("MainWindow", "x86 Libs"))
self.lineEdit_android_logcat_filters.setToolTip(_translate("MainWindow", "# (str) Android logcat filters to use"))
self.lineEdit_android_logcat_filters.setText(_translate("MainWindow", "*:S python:D"))
self.lineEdit_android_add_libs_x86.setText(_translate("MainWindow", "libs/android-x86/*.so"))
self.lineEdit_android_library_references.setToolTip(_translate("MainWindow", "# (list) Android library project to add (will be added in the\n"
"# project.properties automatically.)"))
self.comboBox_android_ouya_category.setToolTip(_translate("MainWindow", "# (str) OUYA Console category. Should be one of GAME or APP\n"
"# If you leave this blank, OUYA support will not be enabled"))
self.comboBox_android_ouya_category.setItemText(0, _translate("MainWindow", "Disabled"))
self.comboBox_android_ouya_category.setItemText(1, _translate("MainWindow", "APP"))
self.comboBox_android_ouya_category.setItemText(2, _translate("MainWindow", "GAME"))
self.label_36.setText(_translate("MainWindow", "Ouya Icon: (732x412 PNG)"))
self.label_54.setText(_translate("MainWindow", "Ouya"))
self.label_35.setText(_translate("MainWindow", "Ouya Category:"))
self.lineEdit_android_ouya_icon_filename.setToolTip(_translate("MainWindow", "# (str) Filename of OUYA Console icon. It must be a 732x412 png image."))
self.lineEdit_android_ouya_icon_filename.setText(_translate("MainWindow", "%(source.dir)s/data/ouya_icon.png"))
self.label_68.setText(_translate("MainWindow", "Android Blacklist:"))
self.label_69.setText(_translate("MainWindow", "Android Whitelist:"))
self.lineEdit_android_blacklist.setToolTip(_translate("MainWindow", "blacklist file"))
self.lineEdit_android_whitelist.setToolTip(_translate("MainWindow", "(list) Pattern to whitelist for the whole project"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabAndroid2), _translate("MainWindow", "Android (Advanced)"))
self.tabP4A.setToolTip(_translate("MainWindow", "Python for android (p4a) specific"))
self.label_34.setText(_translate("MainWindow", "P4A Branch:"))
self.lineEdit_p4a_branch.setToolTip(_translate("MainWindow", "(str) python-for-android branch to use, defaults to stable"))
self.lineEdit_p4a_branch.setText(_translate("MainWindow", "stable"))
self.label_42.setText(_translate("MainWindow", "P4A Source Dir:"))
self.label_43.setText(_translate("MainWindow", "P4A Recipes Dir:"))
self.label_44.setText(_translate("MainWindow", "P4A Hook:"))
self.label_45.setText(_translate("MainWindow", "P4A Bootstrap:"))
self.label_46.setText(_translate("MainWindow", "P4A Port:"))
self.lineEdit_p4a_port.setToolTip(_translate("MainWindow", "(int) port number to specify an explicit --port= p4a argument (eg for bootstrap flask)"))
self.lineEdit_p4a_bootstrap.setToolTip(_translate("MainWindow", "(str) Bootstrap to use for android builds"))
self.lineEdit_p4a_bootstrap.setText(_translate("MainWindow", "sdl2"))
self.lineEdit_p4a_hook.setToolTip(_translate("MainWindow", "(str) Filename to the hook for p4a"))
self.lineEdit_p4a_local_recipes.setToolTip(_translate("MainWindow", "(str) The directory in which python-for-android should look for your own build recipes (if any)"))
self.lineEdit_p4a_source_dir.setToolTip(_translate("MainWindow", "(str) python-for-android git clone directory (if empty, it will be automatically cloned from github)"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabP4A), _translate("MainWindow", "Python-For-Android"))
self.label_66.setText(_translate("MainWindow", "IOS Codesign Debug:"))
self.lineEdit_ios_codesign_debug.setToolTip(_translate("MainWindow", "# (str) Name of the certificate to use for signing the debug version\n"
"# Get a list of available identities: buildozer ios list_identities"))
self.lineEdit_ios_codesign_debug.setText(_translate("MainWindow", "\"iPhone Developer: <lastname> <firstname> (<hexstring>)\""))
self.label_47.setText(_translate("MainWindow", "IOS Kivy Dir:"))
self.lineEdit_ios_kivy_ios_dir.setToolTip(_translate("MainWindow", "(str) Path to a custom kivy-ios folder"))
self.lineEdit_ios_kivy_ios_dir.setText(_translate("MainWindow", "../kivy-ios"))
self.label_67.setText(_translate("MainWindow", "IOS Codesign Release:"))
self.lineEdit_ios_codesign_release.setToolTip(_translate("MainWindow", "(str) Name of the certificate to use for signing the release version"))
self.lineEdit_ios_codesign_release.setText(_translate("MainWindow", "%(ios.codesign.debug)s"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabIOS), _translate("MainWindow", "iOS"))
self.label_71.setText(_translate("MainWindow", "OSX Python Version:"))
self.label_72.setText(_translate("MainWindow", "OSX Kivy Version:"))
self.spinBox_osx_python_version.setToolTip(_translate("MainWindow", "# change the major version of python used by the app"))
self.lineEdit_osx_kivy_version.setToolTip(_translate("MainWindow", "# Kivy version to use"))
self.lineEdit_osx_kivy_version.setText(_translate("MainWindow", "1.9.1"))
self.label_73.setText(_translate("MainWindow", "Author"))
self.lineEdit_author.setText(_translate("MainWindow", "© Copyright Info"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabOSX), _translate("MainWindow", "OSX"))
self.textEdit.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Hello</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.tabWidget_3.setTabText(self.tabWidget_3.indexOf(self.tab_3), _translate("MainWindow", "Output"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuSettings.setTitle(_translate("MainWindow", "Tools"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionSettings.setText(_translate("MainWindow", "Settings"))
self.actionSettings.setShortcut(_translate("MainWindow", "Ctrl+P"))
|
swprojects/Buildertron
|
buildertron/forms/uiandroidpermissionsdialog.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'androidpermissionsdialog.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_AndroidPermissionsDialog(object):
def setupUi(self, AndroidPermissionsDialog):
AndroidPermissionsDialog.setObjectName("AndroidPermissionsDialog")
AndroidPermissionsDialog.resize(640, 566)
self.gridLayout = QtWidgets.QGridLayout(AndroidPermissionsDialog)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea = QtWidgets.QScrollArea(AndroidPermissionsDialog)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 616, 455))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setObjectName("verticalLayout")
self.gridPermissions = QtWidgets.QGridLayout()
self.gridPermissions.setObjectName("gridPermissions")
self.verticalLayout.addLayout(self.gridPermissions)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 2, 1, 1, 1)
self.label_6 = QtWidgets.QLabel(AndroidPermissionsDialog)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 1, 1, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(AndroidPermissionsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 4, 1, 1, 1)
self.permissionsList = QtWidgets.QLineEdit(AndroidPermissionsDialog)
self.permissionsList.setReadOnly(True)
self.permissionsList.setObjectName("permissionsList")
self.gridLayout.addWidget(self.permissionsList, 3, 1, 1, 1)
self.retranslateUi(AndroidPermissionsDialog)
self.buttonBox.accepted.connect(AndroidPermissionsDialog.accept)
self.buttonBox.rejected.connect(AndroidPermissionsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(AndroidPermissionsDialog)
def retranslateUi(self, AndroidPermissionsDialog):
_translate = QtCore.QCoreApplication.translate
AndroidPermissionsDialog.setWindowTitle(_translate("AndroidPermissionsDialog", "About Buildertron..."))
self.label_6.setText(_translate("AndroidPermissionsDialog", "Note: Ensure you understand the usage of permissions before adding to the spec."))
|
swprojects/Buildertron
|
buildertron/spec/default.py
|
<filename>buildertron/spec/default.py
spec = {
# PROJECT SETTINGS
'targetname': 'android',
'build_dir': './.buildozer',
'bin_dir': './bin',
# APPLICATION
'title': 'My Application',
'package_name': 'myapp',
'version': '0.1',
'version_regex': '__version__ = [\'"](.*)[\'"]',
'version_filename': '%(source.dir)s/main.py',
'package_domain': 'org.test',
# SOURCE
'source_dir': '.',
'source_exclude_dirs': 'tests, bin',
'source_include_exts': 'py,png,jpg,kv,atlas',
'source_exclude_exts': 'spec',
'source_include_patterns': 'assets/*,images/*.png',
'source_exclude_patterns': 'license,images/*/*.jpg',
'requirements': 'kivy',
'requirements_source_kivy': '../../kivy',
'garden_requirements': '',
'presplash_filename': '%(source_dir)s/data/presplash.png',
'icon_filename': '%(source_dir)s/data/icon.png',
'orientation': 'portrait', # landscape, portrait or all
'services': 'NAME:ENTRYPOINT_TO_PY,NAME2:ENTRYPOINT2_TO_PY', # (list) List of service to declare
# ANDROID SPECIFIC
'android_arch': 'armeabi-v7a',
'android_api': 19,
'android_minapi': 9,
'android_sdk': 20,
'android_sdk_path': '',
'android_skip_update': False,
'android_ndk': '9c',
'android_ndk_path': '',
'android_ant_path': '',
'fullscreen': 'False',
'android_permissions': 'INTERNET',
'android_presplash_color': '#FFFFFF',
'android_private_storage': True,
'android_wakelock': False,
# ANDROID SPECIFIC (Advanced)
'android_entrypoint': 'org.renpy.android_PythonActivity',
'android_add_jars': 'foo.jar,bar.jar,path/to/more/*.jar',
'android_add_java_src': '',
'android_add_aars': '',
'android_gradle_dependencies': '',
'android_add_activites': 'com.example.ExampleActivity',
# MANIFEST
'android_manifest_intent_filters': '',
'android_manifest_launch_mode': 'standard',
'android_meta_data': '',
'android_library_references': '',
'android_logcat_filters': '*:S python:D',
'android_copy_libs': '1',
'android_whitelist': '',
'android_blacklist': '',
# ADDITIONAL LIBRARIES
'android_add_libs_armeabi': 'libs/android/*.so',
'android_add_libs_armeabi_v7a': 'libs/android-v7/*.so',
'android_add_libs_x86': 'libs/android-x86/*.so',
'android_add_libs_mips': 'libs/android-mips/*.so',
# OUYA
'android_ouya_category': 'GAME',
'android_ouya_icon_filename': '%(source_dir)s/data/ouya_icon.png',
# Python for android (p4a) specific
'p4a_branch': 'stable',
'p4a_source_dir': '',
'p4a_local_recipes': '',
'p4a_hook': '',
'p4a_bootstrap': 'sdl2',
'p4a_port': '',
# iOS specific
'ios_kivy_ios_dir': '../kivy-ios',
'ios_codesign_debug': 'iPhone Developer: <lastname> <firstname> (<hexstring>)',
'ios_codesign_release': '%(ios_codesign.debug)s',
# OSX Specific
'author': '© Copyright Info',
'osx_python_version': 3,
'osx_kivy_version': '1.9.1',
}
|
swprojects/Buildertron
|
buildertron/dialogs/androidpermissionsdialog.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*
"""
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from forms.uiandroidpermissionsdialog import Ui_AndroidPermissionsDialog
from PyQt5.QtWidgets import QDialog, QCheckBox
from functools import partial
from spec.android_permissions import get_android_permissions
permissions = get_android_permissions()
class AndroidPermissionsDialog(QDialog, Ui_AndroidPermissionsDialog):
def __init__(self, parent):
super(AndroidPermissionsDialog, self).__init__(parent)
self.ui = Ui_AndroidPermissionsDialog()
self.ui.setupUi(self)
self.permissionsList = self.ui.permissionsList
self.gridPermissions = self.ui.gridPermissions
self.checkBoxes = {}
for p in permissions:
c = QCheckBox(self)
self.checkBoxes[p] = c
c.setText(p)
self.gridPermissions.addWidget(c)
c.toggled.connect(partial(self.onCheckBox, p))
@property
def value(self):
return self.permissionsList.text()
def onCheckBox(self, permission, state):
perm_list = self.value.split(',')
new_list = {p for p in perm_list if p in permissions}
if state is True:
new_list.add(permission)
else:
new_list.remove(permission)
new_list = sorted(list(new_list))
self.permissionsList.setText(','.join(new_list))
def setValue(self, value):
perm_list = value.split(',')
new_list = {p for p in perm_list if p in permissions}
new_list = sorted(list(new_list))
for p in new_list:
chk = self.checkBoxes.get(p, None)
if not chk:
continue
self.checkBoxes[p].setCheckState(2)
self.permissionsList.setText(','.join(new_list))
|
swprojects/Buildertron
|
buildertron/spec/android_presplash_colors.py
|
<gh_stars>1-10
colors = [
'red', 'blue', 'green', 'black', 'white', 'gray', 'cyan', 'magenta',
'yellow', 'lightgray', 'darkgray', 'grey', 'lightgrey', 'darkgrey',
'aqua', 'fuchsia', 'lime', 'maroon', 'navy', 'olive', 'purple', 'silver',
'teal'
]
def get_android_presplash_colors():
return colors
|
swprojects/Buildertron
|
info.py
|
<reponame>swprojects/Buildertron<filename>info.py
from os import path
from buildertron.version import __version__
__projectname__ = 'buildertron'
# __version__ = '0.1.1'
__homepage__ = 'https://github.com/swprojects/Buildertron'
__author__ = '<NAME>'
__description__ = 'A buildozer front-end for Linux'
__author_email__ = '<EMAIL>',
__classifiers__ = [
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
try:
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README'), encoding='utf-8') as readme_file:
__readme__ = readme_file.read()
except Exception:
__readme__ = ''
try:
with open(path.join(here, 'HISTORY'), encoding='utf-8') as history_file:
__history__ = history_file.read().replace('.. :changelog:', '')
except Exception:
__history__ = ''
|
swprojects/Buildertron
|
buildertron/spec/buildozerdefault.py
|
<filename>buildertron/spec/buildozerdefault.py
default_template = ("""[app]
{app}
[buildozer]
{buildozer}
""")
def get_default_template():
return default_template
|
swprojects/Buildertron
|
buildertron/forms/uisubmitissuedialog.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'submitissuedialog.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SubmitIssueDialog(object):
def setupUi(self, SubmitIssueDialog):
SubmitIssueDialog.setObjectName("SubmitIssueDialog")
SubmitIssueDialog.resize(640, 250)
self.gridLayout = QtWidgets.QGridLayout(SubmitIssueDialog)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 1, 1, 1, 1)
self.label = QtWidgets.QLabel(SubmitIssueDialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(SubmitIssueDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 2, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(SubmitIssueDialog)
self.label_4.setOpenExternalLinks(True)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 1, 1, 1)
self.retranslateUi(SubmitIssueDialog)
self.buttonBox.accepted.connect(SubmitIssueDialog.accept)
self.buttonBox.rejected.connect(SubmitIssueDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SubmitIssueDialog)
def retranslateUi(self, SubmitIssueDialog):
_translate = QtCore.QCoreApplication.translate
SubmitIssueDialog.setWindowTitle(_translate("SubmitIssueDialog", "Submit an issue..."))
self.label.setText(_translate("SubmitIssueDialog", "Buildertron Issues:"))
self.label_4.setText(_translate("SubmitIssueDialog", "https://github.com/swprojects/Buildertron/issues"))
|
swprojects/Buildertron
|
buildertron/forms/uimainwindow.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.10
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(1000, 800)
MainWindow.setDockNestingEnabled(True)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout_5.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_5.setSpacing(6)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.splitterMain = QtWidgets.QSplitter(self.centralWidget)
self.splitterMain.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(50)
sizePolicy.setHeightForWidth(self.splitterMain.sizePolicy().hasHeightForWidth())
self.splitterMain.setSizePolicy(sizePolicy)
self.splitterMain.setMinimumSize(QtCore.QSize(0, 400))
self.splitterMain.setSizeIncrement(QtCore.QSize(0, 0))
self.splitterMain.setBaseSize(QtCore.QSize(0, 0))
self.splitterMain.setFrameShape(QtWidgets.QFrame.NoFrame)
self.splitterMain.setFrameShadow(QtWidgets.QFrame.Plain)
self.splitterMain.setOrientation(QtCore.Qt.Vertical)
self.splitterMain.setOpaqueResize(True)
self.splitterMain.setHandleWidth(0)
self.splitterMain.setChildrenCollapsible(False)
self.splitterMain.setObjectName("splitterMain")
self.splitterTop = QtWidgets.QSplitter(self.splitterMain)
self.splitterTop.setBaseSize(QtCore.QSize(700, 600))
self.splitterTop.setOrientation(QtCore.Qt.Horizontal)
self.splitterTop.setHandleWidth(2)
self.splitterTop.setChildrenCollapsible(False)
self.splitterTop.setObjectName("splitterTop")
self.tabAdvanced = QtWidgets.QTabWidget(self.splitterTop)
self.tabAdvanced.setTabPosition(QtWidgets.QTabWidget.North)
self.tabAdvanced.setTabShape(QtWidgets.QTabWidget.Rounded)
self.tabAdvanced.setObjectName("tabAdvanced")
self.Project = QtWidgets.QWidget()
self.Project.setObjectName("Project")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.Project)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.scrollArea = QtWidgets.QScrollArea(self.Project)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea.sizePolicy().hasHeightForWidth())
self.scrollArea.setSizePolicy(sizePolicy)
self.scrollArea.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 503, 698))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.gridLayout.setSpacing(6)
self.gridLayout.setObjectName("gridLayout")
self.checkbox_icon_filename = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_icon_filename.setText("")
self.checkbox_icon_filename.setObjectName("checkbox_icon_filename")
self.gridLayout.addWidget(self.checkbox_icon_filename, 19, 1, 1, 1)
self.checkbox_version_regex = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_version_regex.setEnabled(True)
self.checkbox_version_regex.setText("")
self.checkbox_version_regex.setCheckable(True)
self.checkbox_version_regex.setObjectName("checkbox_version_regex")
self.gridLayout.addWidget(self.checkbox_version_regex, 4, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_9.setObjectName("label_9")
self.gridLayout.addWidget(self.label_9, 10, 0, 1, 1)
self.toolbutton_source_dir = QtWidgets.QToolButton(self.scrollAreaWidgetContents)
self.toolbutton_source_dir.setObjectName("toolbutton_source_dir")
self.gridLayout.addWidget(self.toolbutton_source_dir, 9, 3, 1, 1)
self.lineedit_source_exclude_exts = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_source_exclude_exts.setEnabled(False)
self.lineedit_source_exclude_exts.setObjectName("lineedit_source_exclude_exts")
self.gridLayout.addWidget(self.lineedit_source_exclude_exts, 12, 2, 1, 1)
self.checkbox_source_include_exts = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_source_include_exts.setText("")
self.checkbox_source_include_exts.setObjectName("checkbox_source_include_exts")
self.gridLayout.addWidget(self.checkbox_source_include_exts, 11, 1, 1, 1)
self.label_31 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.label_31.setFont(font)
self.label_31.setObjectName("label_31")
self.gridLayout.addWidget(self.label_31, 8, 0, 1, 1)
self.lineedit_requirements = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_requirements.setEnabled(False)
self.lineedit_requirements.setObjectName("lineedit_requirements")
self.gridLayout.addWidget(self.lineedit_requirements, 16, 2, 1, 1)
self.lineedit_icon_filename = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_icon_filename.setEnabled(False)
self.lineedit_icon_filename.setObjectName("lineedit_icon_filename")
self.gridLayout.addWidget(self.lineedit_icon_filename, 19, 2, 1, 1)
self.label_77 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_77.setObjectName("label_77")
self.gridLayout.addWidget(self.label_77, 18, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 6, 0, 1, 1)
self.label_78 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_78.setObjectName("label_78")
self.gridLayout.addWidget(self.label_78, 16, 0, 1, 1)
self.lineedit_source_include_exts = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_source_include_exts.setEnabled(False)
self.lineedit_source_include_exts.setObjectName("lineedit_source_include_exts")
self.gridLayout.addWidget(self.lineedit_source_include_exts, 11, 2, 1, 1)
self.checkbox_requirements = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_requirements.setText("")
self.checkbox_requirements.setObjectName("checkbox_requirements")
self.gridLayout.addWidget(self.checkbox_requirements, 16, 1, 1, 1)
self.label_76 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_76.setObjectName("label_76")
self.gridLayout.addWidget(self.label_76, 17, 0, 1, 1)
self.lineedit_presplash_filename = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_presplash_filename.setEnabled(False)
self.lineedit_presplash_filename.setObjectName("lineedit_presplash_filename")
self.gridLayout.addWidget(self.lineedit_presplash_filename, 18, 2, 1, 1)
self.checkbox_garden_requirements = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_garden_requirements.setText("")
self.checkbox_garden_requirements.setObjectName("checkbox_garden_requirements")
self.gridLayout.addWidget(self.checkbox_garden_requirements, 17, 1, 1, 1)
self.checkbox_presplash_filename = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_presplash_filename.setText("")
self.checkbox_presplash_filename.setObjectName("checkbox_presplash_filename")
self.gridLayout.addWidget(self.checkbox_presplash_filename, 18, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_5.setObjectName("label_5")
self.gridLayout.addWidget(self.label_5, 9, 0, 1, 1)
self.label_29 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_29.setText("")
self.label_29.setObjectName("label_29")
self.gridLayout.addWidget(self.label_29, 7, 0, 1, 1)
self.label_75 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_75.setObjectName("label_75")
self.gridLayout.addWidget(self.label_75, 19, 0, 1, 1)
self.lineedit_package_name = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_package_name.setObjectName("lineedit_package_name")
self.gridLayout.addWidget(self.lineedit_package_name, 2, 2, 1, 1)
self.label_6 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_6.setObjectName("label_6")
self.gridLayout.addWidget(self.label_6, 11, 0, 1, 1)
self.lineedit_garden_requirements = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_garden_requirements.setEnabled(False)
self.lineedit_garden_requirements.setObjectName("lineedit_garden_requirements")
self.gridLayout.addWidget(self.lineedit_garden_requirements, 17, 2, 1, 1)
self.checkbox_source_exclude_dirs = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_source_exclude_dirs.setText("")
self.checkbox_source_exclude_dirs.setObjectName("checkbox_source_exclude_dirs")
self.gridLayout.addWidget(self.checkbox_source_exclude_dirs, 10, 1, 1, 1)
self.label_79 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
font = QtGui.QFont()
font.setBold(True)
font.setUnderline(True)
font.setWeight(75)
self.label_79.setFont(font)
self.label_79.setObjectName("label_79")
self.gridLayout.addWidget(self.label_79, 0, 0, 1, 1)
self.lineedit_title = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_title.setClearButtonEnabled(False)
self.lineedit_title.setObjectName("lineedit_title")
self.gridLayout.addWidget(self.lineedit_title, 1, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_8.setObjectName("label_8")
self.gridLayout.addWidget(self.label_8, 12, 0, 1, 1)
self.lineedit_source_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_source_dir.setEnabled(True)
self.lineedit_source_dir.setObjectName("lineedit_source_dir")
self.gridLayout.addWidget(self.lineedit_source_dir, 9, 2, 1, 1)
self.lineedit_source_exclude_patterns = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_source_exclude_patterns.setEnabled(False)
self.lineedit_source_exclude_patterns.setObjectName("lineedit_source_exclude_patterns")
self.gridLayout.addWidget(self.lineedit_source_exclude_patterns, 14, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem, 22, 0, 1, 1)
self.checkbox_source_include_patterns = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_source_include_patterns.setText("")
self.checkbox_source_include_patterns.setObjectName("checkbox_source_include_patterns")
self.gridLayout.addWidget(self.checkbox_source_include_patterns, 13, 1, 1, 1)
self.checkbox_services = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_services.setText("")
self.checkbox_services.setObjectName("checkbox_services")
self.gridLayout.addWidget(self.checkbox_services, 21, 1, 1, 1)
self.label_30 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_30.setText("")
self.label_30.setObjectName("label_30")
self.gridLayout.addWidget(self.label_30, 15, 0, 1, 1)
self.lineedit_source_exclude_dirs = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_source_exclude_dirs.setEnabled(False)
self.lineedit_source_exclude_dirs.setObjectName("lineedit_source_exclude_dirs")
self.gridLayout.addWidget(self.lineedit_source_exclude_dirs, 10, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.checkbox_source_exclude_exts = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_source_exclude_exts.setText("")
self.checkbox_source_exclude_exts.setObjectName("checkbox_source_exclude_exts")
self.gridLayout.addWidget(self.checkbox_source_exclude_exts, 12, 1, 1, 1)
self.checkbox_source_exclude_patterns = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_source_exclude_patterns.setText("")
self.checkbox_source_exclude_patterns.setObjectName("checkbox_source_exclude_patterns")
self.gridLayout.addWidget(self.checkbox_source_exclude_patterns, 14, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_10.setObjectName("label_10")
self.gridLayout.addWidget(self.label_10, 14, 0, 1, 1)
self.lineedit_services = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_services.setEnabled(False)
self.lineedit_services.setObjectName("lineedit_services")
self.gridLayout.addWidget(self.lineedit_services, 21, 2, 1, 1)
self.lineedit_version = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_version.setObjectName("lineedit_version")
self.gridLayout.addWidget(self.lineedit_version, 3, 2, 1, 1)
self.label = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.lineedit_package_domain = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_package_domain.setObjectName("lineedit_package_domain")
self.gridLayout.addWidget(self.lineedit_package_domain, 6, 2, 1, 1)
self.label_11 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_11.setObjectName("label_11")
self.gridLayout.addWidget(self.label_11, 20, 0, 1, 1)
self.label_26 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_26.setObjectName("label_26")
self.gridLayout.addWidget(self.label_26, 21, 0, 1, 1)
self.combobox_orientation = QtWidgets.QComboBox(self.scrollAreaWidgetContents)
self.combobox_orientation.setObjectName("combobox_orientation")
self.combobox_orientation.addItem("")
self.combobox_orientation.addItem("")
self.combobox_orientation.addItem("")
self.gridLayout.addWidget(self.combobox_orientation, 20, 2, 1, 1)
self.label_7 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_7.setObjectName("label_7")
self.gridLayout.addWidget(self.label_7, 13, 0, 1, 1)
self.lineedit_source_include_patterns = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_source_include_patterns.setEnabled(False)
self.lineedit_source_include_patterns.setObjectName("lineedit_source_include_patterns")
self.gridLayout.addWidget(self.lineedit_source_include_patterns, 13, 2, 1, 1)
self.label_32 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_32.setObjectName("label_32")
self.gridLayout.addWidget(self.label_32, 4, 0, 1, 1)
self.lineedit_version_regex = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_version_regex.setEnabled(False)
self.lineedit_version_regex.setObjectName("lineedit_version_regex")
self.gridLayout.addWidget(self.lineedit_version_regex, 4, 2, 1, 1)
self.label_64 = QtWidgets.QLabel(self.scrollAreaWidgetContents)
self.label_64.setObjectName("label_64")
self.gridLayout.addWidget(self.label_64, 5, 0, 1, 1)
self.checkbox_version_filename = QtWidgets.QCheckBox(self.scrollAreaWidgetContents)
self.checkbox_version_filename.setText("")
self.checkbox_version_filename.setCheckable(True)
self.checkbox_version_filename.setObjectName("checkbox_version_filename")
self.gridLayout.addWidget(self.checkbox_version_filename, 5, 1, 1, 1)
self.lineedit_version_filename = QtWidgets.QLineEdit(self.scrollAreaWidgetContents)
self.lineedit_version_filename.setEnabled(False)
self.lineedit_version_filename.setObjectName("lineedit_version_filename")
self.gridLayout.addWidget(self.lineedit_version_filename, 5, 2, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout_3.addWidget(self.scrollArea)
self.tabAdvanced.addTab(self.Project, "")
self.tabProject = QtWidgets.QWidget()
self.tabProject.setEnabled(True)
self.tabProject.setObjectName("tabProject")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.tabProject)
self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_7.setSpacing(6)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.scrollArea_6 = QtWidgets.QScrollArea(self.tabProject)
self.scrollArea_6.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_6.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_6.setWidgetResizable(True)
self.scrollArea_6.setObjectName("scrollArea_6")
self.scrollAreaWidgetContents_7 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_7.setGeometry(QtCore.QRect(0, 0, 503, 698))
self.scrollAreaWidgetContents_7.setObjectName("scrollAreaWidgetContents_7")
self.gridLayout_4 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_7)
self.gridLayout_4.setContentsMargins(11, 11, 11, 11)
self.gridLayout_4.setSpacing(6)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_65 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_65.setObjectName("label_65")
self.gridLayout_4.addWidget(self.label_65, 1, 0, 1, 1)
self.label_82 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_82.setText("")
self.label_82.setObjectName("label_82")
self.gridLayout_4.addWidget(self.label_82, 3, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_4.addItem(spacerItem1, 6, 2, 1, 1)
self.label_83 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_83.setObjectName("label_83")
self.gridLayout_4.addWidget(self.label_83, 0, 0, 1, 1)
self.toolbutton_bin_dir = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolbutton_bin_dir.setObjectName("toolbutton_bin_dir")
self.gridLayout_4.addWidget(self.toolbutton_bin_dir, 1, 3, 1, 1)
self.toolbutton_build_dir = QtWidgets.QToolButton(self.scrollAreaWidgetContents_7)
self.toolbutton_build_dir.setObjectName("toolbutton_build_dir")
self.gridLayout_4.addWidget(self.toolbutton_build_dir, 0, 3, 1, 1)
self.lineedit_build_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.lineedit_build_dir.setEnabled(False)
self.lineedit_build_dir.setObjectName("lineedit_build_dir")
self.gridLayout_4.addWidget(self.lineedit_build_dir, 0, 2, 1, 1)
self.label_84 = QtWidgets.QLabel(self.scrollAreaWidgetContents_7)
self.label_84.setText("")
self.label_84.setObjectName("label_84")
self.gridLayout_4.addWidget(self.label_84, 2, 0, 1, 1)
self.lineedit_bin_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_7)
self.lineedit_bin_dir.setEnabled(False)
self.lineedit_bin_dir.setObjectName("lineedit_bin_dir")
self.gridLayout_4.addWidget(self.lineedit_bin_dir, 1, 2, 1, 1)
self.checkbox_bin_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.checkbox_bin_dir.setText("")
self.checkbox_bin_dir.setObjectName("checkbox_bin_dir")
self.gridLayout_4.addWidget(self.checkbox_bin_dir, 1, 1, 1, 1)
self.checkbox_build_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_7)
self.checkbox_build_dir.setText("")
self.checkbox_build_dir.setObjectName("checkbox_build_dir")
self.gridLayout_4.addWidget(self.checkbox_build_dir, 0, 1, 1, 1)
self.scrollArea_6.setWidget(self.scrollAreaWidgetContents_7)
self.horizontalLayout_7.addWidget(self.scrollArea_6)
self.tabAdvanced.addTab(self.tabProject, "")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.tab)
self.verticalLayout_7.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_7.setSpacing(6)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.refreshPreview = QtWidgets.QPushButton(self.tab)
self.refreshPreview.setObjectName("refreshPreview")
self.verticalLayout.addWidget(self.refreshPreview)
self.previewText = QtWidgets.QTextEdit(self.tab)
self.previewText.setObjectName("previewText")
self.verticalLayout.addWidget(self.previewText)
self.verticalLayout_7.addLayout(self.verticalLayout)
self.tabAdvanced.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.tab_2)
self.horizontalLayout_9.setContentsMargins(11, 11, 11, 11)
self.horizontalLayout_9.setSpacing(6)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setSpacing(6)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.refreshSystem = QtWidgets.QPushButton(self.tab_2)
self.refreshSystem.setObjectName("refreshSystem")
self.verticalLayout_8.addWidget(self.refreshSystem)
self.textEditSystem = QtWidgets.QTextEdit(self.tab_2)
self.textEditSystem.setObjectName("textEditSystem")
self.verticalLayout_8.addWidget(self.textEditSystem)
self.horizontalLayout_9.addLayout(self.verticalLayout_8)
self.tabAdvanced.addTab(self.tab_2, "")
self.tabWidget_2 = QtWidgets.QTabWidget(self.splitterTop)
self.tabWidget_2.setEnabled(True)
self.tabWidget_2.setObjectName("tabWidget_2")
self.tabAndroid = QtWidgets.QWidget()
self.tabAndroid.setObjectName("tabAndroid")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.tabAndroid)
self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_4.setSpacing(0)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.scrollArea_2 = QtWidgets.QScrollArea(self.tabAndroid)
self.scrollArea_2.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_2.setLineWidth(0)
self.scrollArea_2.setWidgetResizable(True)
self.scrollArea_2.setObjectName("scrollArea_2")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 461, 698))
self.scrollAreaWidgetContents_2.setAutoFillBackground(True)
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_2)
self.gridLayout_2.setContentsMargins(11, 11, 11, 11)
self.gridLayout_2.setSpacing(6)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_21 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_21.setObjectName("label_21")
self.gridLayout_2.addWidget(self.label_21, 3, 0, 1, 1)
self.lineedit2_android_minapi = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit2_android_minapi.setObjectName("lineedit2_android_minapi")
self.gridLayout_2.addWidget(self.lineedit2_android_minapi, 3, 2, 1, 1)
self.label_13 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_13.setObjectName("label_13")
self.gridLayout_2.addWidget(self.label_13, 9, 0, 1, 1)
self.checkbox_android_ant_path = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox_android_ant_path.setText("")
self.checkbox_android_ant_path.setObjectName("checkbox_android_ant_path")
self.gridLayout_2.addWidget(self.checkbox_android_ant_path, 9, 1, 1, 1)
self.label_20 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_20.setObjectName("label_20")
self.gridLayout_2.addWidget(self.label_20, 2, 0, 1, 1)
self.label_40 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_40.setText("")
self.label_40.setObjectName("label_40")
self.gridLayout_2.addWidget(self.label_40, 1, 0, 1, 1)
self.lineedit_android_ant_path = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit_android_ant_path.setEnabled(False)
self.lineedit_android_ant_path.setText("")
self.lineedit_android_ant_path.setObjectName("lineedit_android_ant_path")
self.gridLayout_2.addWidget(self.lineedit_android_ant_path, 9, 2, 1, 1)
self.toolbutton_android_ant_path = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolbutton_android_ant_path.setObjectName("toolbutton_android_ant_path")
self.gridLayout_2.addWidget(self.toolbutton_android_ant_path, 9, 3, 1, 1)
self.label_25 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_25.setObjectName("label_25")
self.gridLayout_2.addWidget(self.label_25, 0, 0, 1, 1)
self.combobox_android_arch = QtWidgets.QComboBox(self.scrollAreaWidgetContents_2)
self.combobox_android_arch.setObjectName("combobox_android_arch")
self.combobox_android_arch.addItem("")
self.combobox_android_arch.addItem("")
self.combobox_android_arch.addItem("")
self.gridLayout_2.addWidget(self.combobox_android_arch, 0, 2, 1, 1)
self.lineedit2_android_api = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit2_android_api.setObjectName("lineedit2_android_api")
self.gridLayout_2.addWidget(self.lineedit2_android_api, 2, 2, 1, 1)
self.label_22 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_22.setObjectName("label_22")
self.gridLayout_2.addWidget(self.label_22, 4, 0, 1, 1)
self.label_16 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_16.setObjectName("label_16")
self.gridLayout_2.addWidget(self.label_16, 6, 0, 1, 1)
self.label_41 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_41.setText("")
self.label_41.setObjectName("label_41")
self.gridLayout_2.addWidget(self.label_41, 15, 0, 1, 1)
self.label_19 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_19.setObjectName("label_19")
self.gridLayout_2.addWidget(self.label_19, 7, 0, 1, 1)
self.label_23 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_23.setObjectName("label_23")
self.gridLayout_2.addWidget(self.label_23, 17, 0, 1, 1)
self.label_12 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_12.setObjectName("label_12")
self.gridLayout_2.addWidget(self.label_12, 16, 0, 1, 1)
self.permissions_android_permissions = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.permissions_android_permissions.setObjectName("permissions_android_permissions")
self.gridLayout_2.addWidget(self.permissions_android_permissions, 17, 3, 1, 1)
self.checkbox_android_presplash_color = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox_android_presplash_color.setText("")
self.checkbox_android_presplash_color.setObjectName("checkbox_android_presplash_color")
self.gridLayout_2.addWidget(self.checkbox_android_presplash_color, 18, 1, 1, 1)
self.label_14 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_14.setObjectName("label_14")
self.gridLayout_2.addWidget(self.label_14, 18, 0, 1, 1)
self.label_18 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_18.setObjectName("label_18")
self.gridLayout_2.addWidget(self.label_18, 5, 0, 1, 1)
self.checkbox2_fullscreen = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox2_fullscreen.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkbox2_fullscreen.setText("")
self.checkbox2_fullscreen.setObjectName("checkbox2_fullscreen")
self.gridLayout_2.addWidget(self.checkbox2_fullscreen, 16, 2, 1, 1)
self.combobox_android_presplash_color = QtWidgets.QComboBox(self.scrollAreaWidgetContents_2)
self.combobox_android_presplash_color.setEnabled(False)
self.combobox_android_presplash_color.setEditable(True)
self.combobox_android_presplash_color.setObjectName("combobox_android_presplash_color")
self.gridLayout_2.addWidget(self.combobox_android_presplash_color, 18, 2, 1, 1)
self.colorpick_android_presplash_color = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.colorpick_android_presplash_color.setObjectName("colorpick_android_presplash_color")
self.gridLayout_2.addWidget(self.colorpick_android_presplash_color, 18, 3, 1, 1)
self.checkbox2_android_private_storage = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox2_android_private_storage.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkbox2_android_private_storage.setText("")
self.checkbox2_android_private_storage.setObjectName("checkbox2_android_private_storage")
self.gridLayout_2.addWidget(self.checkbox2_android_private_storage, 19, 2, 1, 1)
self.label_24 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_24.setObjectName("label_24")
self.gridLayout_2.addWidget(self.label_24, 19, 0, 1, 1)
self.label_58 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_58.setObjectName("label_58")
self.gridLayout_2.addWidget(self.label_58, 20, 0, 1, 1)
self.checkbox2_android_wakelock = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox2_android_wakelock.setText("")
self.checkbox2_android_wakelock.setObjectName("checkbox2_android_wakelock")
self.gridLayout_2.addWidget(self.checkbox2_android_wakelock, 20, 2, 1, 1)
self.lineedit_android_permissions = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit_android_permissions.setEnabled(False)
self.lineedit_android_permissions.setReadOnly(True)
self.lineedit_android_permissions.setObjectName("lineedit_android_permissions")
self.gridLayout_2.addWidget(self.lineedit_android_permissions, 17, 2, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(103, 53, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem2, 21, 0, 1, 1)
self.checkbox2_android_skip_update = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox2_android_skip_update.setText("")
self.checkbox2_android_skip_update.setObjectName("checkbox2_android_skip_update")
self.gridLayout_2.addWidget(self.checkbox2_android_skip_update, 6, 2, 1, 1)
self.toolbutton_android_sdk_path = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolbutton_android_sdk_path.setObjectName("toolbutton_android_sdk_path")
self.gridLayout_2.addWidget(self.toolbutton_android_sdk_path, 5, 3, 1, 1)
self.checkbox_android_sdk_path = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox_android_sdk_path.setText("")
self.checkbox_android_sdk_path.setObjectName("checkbox_android_sdk_path")
self.gridLayout_2.addWidget(self.checkbox_android_sdk_path, 5, 1, 1, 1)
self.lineedit_android_sdk_path = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit_android_sdk_path.setEnabled(False)
self.lineedit_android_sdk_path.setText("")
self.lineedit_android_sdk_path.setObjectName("lineedit_android_sdk_path")
self.gridLayout_2.addWidget(self.lineedit_android_sdk_path, 5, 2, 1, 1)
self.lineedit2_android_sdk = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit2_android_sdk.setObjectName("lineedit2_android_sdk")
self.gridLayout_2.addWidget(self.lineedit2_android_sdk, 4, 2, 1, 1)
self.label_17 = QtWidgets.QLabel(self.scrollAreaWidgetContents_2)
self.label_17.setToolTip("")
self.label_17.setStatusTip("")
self.label_17.setAccessibleName("")
self.label_17.setObjectName("label_17")
self.gridLayout_2.addWidget(self.label_17, 8, 0, 1, 1)
self.lineedit2_android_ndk = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit2_android_ndk.setObjectName("lineedit2_android_ndk")
self.gridLayout_2.addWidget(self.lineedit2_android_ndk, 7, 2, 1, 1)
self.checkbox_android_ndk_path = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox_android_ndk_path.setText("")
self.checkbox_android_ndk_path.setObjectName("checkbox_android_ndk_path")
self.gridLayout_2.addWidget(self.checkbox_android_ndk_path, 8, 1, 1, 1)
self.lineedit_android_ndk_path = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_2)
self.lineedit_android_ndk_path.setEnabled(False)
self.lineedit_android_ndk_path.setStatusTip("")
self.lineedit_android_ndk_path.setText("")
self.lineedit_android_ndk_path.setObjectName("lineedit_android_ndk_path")
self.gridLayout_2.addWidget(self.lineedit_android_ndk_path, 8, 2, 1, 1)
self.toolbutton_android_ndk_path = QtWidgets.QToolButton(self.scrollAreaWidgetContents_2)
self.toolbutton_android_ndk_path.setObjectName("toolbutton_android_ndk_path")
self.gridLayout_2.addWidget(self.toolbutton_android_ndk_path, 8, 3, 1, 1)
self.checkbox_android_permissions = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_2)
self.checkbox_android_permissions.setText("")
self.checkbox_android_permissions.setObjectName("checkbox_android_permissions")
self.gridLayout_2.addWidget(self.checkbox_android_permissions, 17, 1, 1, 1)
self.scrollArea_2.setWidget(self.scrollAreaWidgetContents_2)
self.horizontalLayout_4.addWidget(self.scrollArea_2)
self.tabWidget_2.addTab(self.tabAndroid, "")
self.tabAndroid2 = QtWidgets.QWidget()
self.tabAndroid2.setObjectName("tabAndroid2")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.tabAndroid2)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.scrollArea_3 = QtWidgets.QScrollArea(self.tabAndroid2)
self.scrollArea_3.setEnabled(True)
self.scrollArea_3.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_3.setWidgetResizable(True)
self.scrollArea_3.setObjectName("scrollArea_3")
self.scrollAreaWidgetContents_4 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_4.setGeometry(QtCore.QRect(0, 0, 440, 765))
self.scrollAreaWidgetContents_4.setObjectName("scrollAreaWidgetContents_4")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_4)
self.verticalLayout_2.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_2.setSpacing(6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setSpacing(6)
self.gridLayout_3.setObjectName("gridLayout_3")
self.checkbox_android_add_jars = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_jars.setText("")
self.checkbox_android_add_jars.setObjectName("checkbox_android_add_jars")
self.gridLayout_3.addWidget(self.checkbox_android_add_jars, 1, 1, 1, 1)
self.label_28 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_28.setObjectName("label_28")
self.gridLayout_3.addWidget(self.label_28, 2, 0, 1, 1)
self.checkbox_android_add_aars = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_aars.setText("")
self.checkbox_android_add_aars.setObjectName("checkbox_android_add_aars")
self.gridLayout_3.addWidget(self.checkbox_android_add_aars, 2, 1, 1, 1)
self.label_15 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 1, 0, 1, 1)
self.lineedit_android_add_jars = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_jars.setEnabled(False)
self.lineedit_android_add_jars.setObjectName("lineedit_android_add_jars")
self.gridLayout_3.addWidget(self.lineedit_android_add_jars, 1, 2, 1, 1)
self.lineedit_android_add_aars = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_aars.setEnabled(False)
self.lineedit_android_add_aars.setObjectName("lineedit_android_add_aars")
self.gridLayout_3.addWidget(self.lineedit_android_add_aars, 2, 2, 1, 1)
self.label_33 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_33.setObjectName("label_33")
self.gridLayout_3.addWidget(self.label_33, 0, 0, 1, 1)
self.checkbox_android_entrypoint = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_entrypoint.setText("")
self.checkbox_android_entrypoint.setObjectName("checkbox_android_entrypoint")
self.gridLayout_3.addWidget(self.checkbox_android_entrypoint, 0, 1, 1, 1)
self.lineedit_android_entrypoint = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_entrypoint.setEnabled(False)
self.lineedit_android_entrypoint.setObjectName("lineedit_android_entrypoint")
self.gridLayout_3.addWidget(self.lineedit_android_entrypoint, 0, 2, 1, 1)
self.checkbox_android_manifest_intent_filters = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_manifest_intent_filters.setText("")
self.checkbox_android_manifest_intent_filters.setObjectName("checkbox_android_manifest_intent_filters")
self.gridLayout_3.addWidget(self.checkbox_android_manifest_intent_filters, 7, 1, 1, 1)
self.label_57 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_57.setObjectName("label_57")
self.gridLayout_3.addWidget(self.label_57, 8, 0, 1, 1)
self.checkbox_android_gradle_dependencies = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_gradle_dependencies.setText("")
self.checkbox_android_gradle_dependencies.setObjectName("checkbox_android_gradle_dependencies")
self.gridLayout_3.addWidget(self.checkbox_android_gradle_dependencies, 4, 1, 1, 1)
self.label_39 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_39.setObjectName("label_39")
self.gridLayout_3.addWidget(self.label_39, 5, 0, 1, 1)
self.label_27 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_27.setObjectName("label_27")
self.gridLayout_3.addWidget(self.label_27, 3, 0, 1, 1)
self.checkbox_android_add_java_src = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_java_src.setText("")
self.checkbox_android_add_java_src.setObjectName("checkbox_android_add_java_src")
self.gridLayout_3.addWidget(self.checkbox_android_add_java_src, 3, 1, 1, 1)
self.label_56 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_56.setObjectName("label_56")
self.gridLayout_3.addWidget(self.label_56, 7, 0, 1, 1)
self.lineedit_android_manifest_intent_filters = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_manifest_intent_filters.setEnabled(False)
self.lineedit_android_manifest_intent_filters.setObjectName("lineedit_android_manifest_intent_filters")
self.gridLayout_3.addWidget(self.lineedit_android_manifest_intent_filters, 7, 2, 1, 1)
self.label_38 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_38.setObjectName("label_38")
self.gridLayout_3.addWidget(self.label_38, 4, 0, 1, 1)
self.lineedit_android_add_java_src = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_java_src.setEnabled(False)
self.lineedit_android_add_java_src.setObjectName("lineedit_android_add_java_src")
self.gridLayout_3.addWidget(self.lineedit_android_add_java_src, 3, 2, 1, 1)
self.lineedit_android_gradle_dependencies = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_gradle_dependencies.setEnabled(False)
self.lineedit_android_gradle_dependencies.setText("")
self.lineedit_android_gradle_dependencies.setObjectName("lineedit_android_gradle_dependencies")
self.gridLayout_3.addWidget(self.lineedit_android_gradle_dependencies, 4, 2, 1, 1)
self.checkbox_android_add_activities = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_activities.setText("")
self.checkbox_android_add_activities.setObjectName("checkbox_android_add_activities")
self.gridLayout_3.addWidget(self.checkbox_android_add_activities, 5, 1, 1, 1)
self.lineedit_android_add_activities = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_activities.setEnabled(False)
self.lineedit_android_add_activities.setObjectName("lineedit_android_add_activities")
self.gridLayout_3.addWidget(self.lineedit_android_add_activities, 5, 2, 1, 1)
self.label_48 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_48.setText("")
self.label_48.setObjectName("label_48")
self.gridLayout_3.addWidget(self.label_48, 6, 0, 1, 1)
self.label_53 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_53.setObjectName("label_53")
self.gridLayout_3.addWidget(self.label_53, 22, 0, 1, 1)
self.checkbox_android_add_libs_mips = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_libs_mips.setText("")
self.checkbox_android_add_libs_mips.setObjectName("checkbox_android_add_libs_mips")
self.gridLayout_3.addWidget(self.checkbox_android_add_libs_mips, 22, 1, 1, 1)
self.label_51 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_51.setObjectName("label_51")
self.gridLayout_3.addWidget(self.label_51, 20, 0, 1, 1)
self.label_61 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_61.setObjectName("label_61")
self.gridLayout_3.addWidget(self.label_61, 11, 0, 1, 1)
self.lineedit_android_add_libs_mips = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_libs_mips.setEnabled(False)
self.lineedit_android_add_libs_mips.setObjectName("lineedit_android_add_libs_mips")
self.gridLayout_3.addWidget(self.lineedit_android_add_libs_mips, 22, 2, 1, 1)
self.label_49 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_49.setText("")
self.label_49.setObjectName("label_49")
self.gridLayout_3.addWidget(self.label_49, 23, 0, 1, 1)
self.checkbox_android_manifest_launch_mode = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_manifest_launch_mode.setText("")
self.checkbox_android_manifest_launch_mode.setObjectName("checkbox_android_manifest_launch_mode")
self.gridLayout_3.addWidget(self.checkbox_android_manifest_launch_mode, 8, 1, 1, 1)
self.checkbox_android_library_references = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_library_references.setText("")
self.checkbox_android_library_references.setObjectName("checkbox_android_library_references")
self.gridLayout_3.addWidget(self.checkbox_android_library_references, 11, 1, 1, 1)
self.lineedit_android_meta_data = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_meta_data.setEnabled(False)
self.lineedit_android_meta_data.setObjectName("lineedit_android_meta_data")
self.gridLayout_3.addWidget(self.lineedit_android_meta_data, 10, 2, 1, 1)
self.checkbox_android_meta_data = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_meta_data.setText("")
self.checkbox_android_meta_data.setObjectName("checkbox_android_meta_data")
self.gridLayout_3.addWidget(self.checkbox_android_meta_data, 10, 1, 1, 1)
self.checkbox_android_logcat_filters = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_logcat_filters.setText("")
self.checkbox_android_logcat_filters.setObjectName("checkbox_android_logcat_filters")
self.gridLayout_3.addWidget(self.checkbox_android_logcat_filters, 12, 1, 1, 1)
self.lineedit_android_manifest_launch_mode = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_manifest_launch_mode.setEnabled(False)
self.lineedit_android_manifest_launch_mode.setObjectName("lineedit_android_manifest_launch_mode")
self.gridLayout_3.addWidget(self.lineedit_android_manifest_launch_mode, 8, 2, 1, 1)
self.label_60 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_60.setObjectName("label_60")
self.gridLayout_3.addWidget(self.label_60, 12, 0, 1, 1)
self.checkbox_android_copy_libs = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_copy_libs.setText("")
self.checkbox_android_copy_libs.setObjectName("checkbox_android_copy_libs")
self.gridLayout_3.addWidget(self.checkbox_android_copy_libs, 13, 1, 1, 1)
self.label_37 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_37.setFont(font)
self.label_37.setObjectName("label_37")
self.gridLayout_3.addWidget(self.label_37, 18, 0, 1, 1)
self.label_63 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_63.setObjectName("label_63")
self.gridLayout_3.addWidget(self.label_63, 13, 0, 1, 1)
self.label_50 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_50.setObjectName("label_50")
self.gridLayout_3.addWidget(self.label_50, 19, 0, 1, 1)
self.lineedit_android_copy_libs = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_copy_libs.setEnabled(False)
self.lineedit_android_copy_libs.setObjectName("lineedit_android_copy_libs")
self.gridLayout_3.addWidget(self.lineedit_android_copy_libs, 13, 2, 1, 1)
self.label_55 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_55.setText("")
self.label_55.setObjectName("label_55")
self.gridLayout_3.addWidget(self.label_55, 9, 0, 1, 1)
self.label_62 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_62.setText("")
self.label_62.setObjectName("label_62")
self.gridLayout_3.addWidget(self.label_62, 14, 0, 1, 1)
self.checkbox_android_add_libs_armeabi = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_libs_armeabi.setText("")
self.checkbox_android_add_libs_armeabi.setObjectName("checkbox_android_add_libs_armeabi")
self.gridLayout_3.addWidget(self.checkbox_android_add_libs_armeabi, 19, 1, 1, 1)
self.label_59 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_59.setObjectName("label_59")
self.gridLayout_3.addWidget(self.label_59, 10, 0, 1, 1)
self.checkbox_android_add_libs_armeabi_v7a = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_libs_armeabi_v7a.setText("")
self.checkbox_android_add_libs_armeabi_v7a.setObjectName("checkbox_android_add_libs_armeabi_v7a")
self.gridLayout_3.addWidget(self.checkbox_android_add_libs_armeabi_v7a, 20, 1, 1, 1)
self.lineedit_android_add_libs_armeabi_v7a = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_libs_armeabi_v7a.setEnabled(False)
self.lineedit_android_add_libs_armeabi_v7a.setObjectName("lineedit_android_add_libs_armeabi_v7a")
self.gridLayout_3.addWidget(self.lineedit_android_add_libs_armeabi_v7a, 20, 2, 1, 1)
self.lineedit_android_add_libs_armeabi = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_libs_armeabi.setEnabled(False)
self.lineedit_android_add_libs_armeabi.setObjectName("lineedit_android_add_libs_armeabi")
self.gridLayout_3.addWidget(self.lineedit_android_add_libs_armeabi, 19, 2, 1, 1)
self.label_52 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_52.setObjectName("label_52")
self.gridLayout_3.addWidget(self.label_52, 21, 0, 1, 1)
self.checkbox_android_add_libs_x86 = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_add_libs_x86.setText("")
self.checkbox_android_add_libs_x86.setObjectName("checkbox_android_add_libs_x86")
self.gridLayout_3.addWidget(self.checkbox_android_add_libs_x86, 21, 1, 1, 1)
self.lineedit_android_logcat_filters = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_logcat_filters.setEnabled(False)
self.lineedit_android_logcat_filters.setObjectName("lineedit_android_logcat_filters")
self.gridLayout_3.addWidget(self.lineedit_android_logcat_filters, 12, 2, 1, 1)
self.lineedit_android_add_libs_x86 = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_add_libs_x86.setEnabled(False)
self.lineedit_android_add_libs_x86.setObjectName("lineedit_android_add_libs_x86")
self.gridLayout_3.addWidget(self.lineedit_android_add_libs_x86, 21, 2, 1, 1)
self.lineedit_android_library_references = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_library_references.setEnabled(False)
self.lineedit_android_library_references.setObjectName("lineedit_android_library_references")
self.gridLayout_3.addWidget(self.lineedit_android_library_references, 11, 2, 1, 1)
self.combobox_android_ouya_category = QtWidgets.QComboBox(self.scrollAreaWidgetContents_4)
self.combobox_android_ouya_category.setEnabled(False)
self.combobox_android_ouya_category.setObjectName("combobox_android_ouya_category")
self.combobox_android_ouya_category.addItem("")
self.combobox_android_ouya_category.addItem("")
self.gridLayout_3.addWidget(self.combobox_android_ouya_category, 25, 2, 1, 1)
self.label_36 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_36.setObjectName("label_36")
self.gridLayout_3.addWidget(self.label_36, 26, 0, 1, 1)
self.label_54 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_54.setFont(font)
self.label_54.setObjectName("label_54")
self.gridLayout_3.addWidget(self.label_54, 24, 0, 1, 1)
self.label_35 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_35.setObjectName("label_35")
self.gridLayout_3.addWidget(self.label_35, 25, 0, 1, 1)
self.checkbox_android_ouya_icon_filename = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_ouya_icon_filename.setText("")
self.checkbox_android_ouya_icon_filename.setObjectName("checkbox_android_ouya_icon_filename")
self.gridLayout_3.addWidget(self.checkbox_android_ouya_icon_filename, 26, 1, 1, 1)
self.lineedit_android_ouya_icon_filename = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_ouya_icon_filename.setEnabled(False)
self.lineedit_android_ouya_icon_filename.setObjectName("lineedit_android_ouya_icon_filename")
self.gridLayout_3.addWidget(self.lineedit_android_ouya_icon_filename, 26, 2, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 27, 2, 1, 1)
self.label_68 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_68.setObjectName("label_68")
self.gridLayout_3.addWidget(self.label_68, 16, 0, 1, 1)
self.label_69 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_69.setObjectName("label_69")
self.gridLayout_3.addWidget(self.label_69, 15, 0, 1, 1)
self.checkbox_android_whitelist = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_whitelist.setText("")
self.checkbox_android_whitelist.setObjectName("checkbox_android_whitelist")
self.gridLayout_3.addWidget(self.checkbox_android_whitelist, 15, 1, 1, 1)
self.checkbox_android_blacklist = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_blacklist.setText("")
self.checkbox_android_blacklist.setObjectName("checkbox_android_blacklist")
self.gridLayout_3.addWidget(self.checkbox_android_blacklist, 16, 1, 1, 1)
self.label_70 = QtWidgets.QLabel(self.scrollAreaWidgetContents_4)
self.label_70.setText("")
self.label_70.setObjectName("label_70")
self.gridLayout_3.addWidget(self.label_70, 17, 0, 1, 1)
self.lineedit_android_blacklist = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_blacklist.setEnabled(False)
self.lineedit_android_blacklist.setObjectName("lineedit_android_blacklist")
self.gridLayout_3.addWidget(self.lineedit_android_blacklist, 16, 2, 1, 1)
self.lineedit_android_whitelist = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_4)
self.lineedit_android_whitelist.setEnabled(False)
self.lineedit_android_whitelist.setObjectName("lineedit_android_whitelist")
self.gridLayout_3.addWidget(self.lineedit_android_whitelist, 15, 2, 1, 1)
self.checkbox_android_ouya_category = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_4)
self.checkbox_android_ouya_category.setText("")
self.checkbox_android_ouya_category.setObjectName("checkbox_android_ouya_category")
self.gridLayout_3.addWidget(self.checkbox_android_ouya_category, 25, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_3)
self.scrollArea_3.setWidget(self.scrollAreaWidgetContents_4)
self.horizontalLayout_5.addWidget(self.scrollArea_3)
self.tabWidget_2.addTab(self.tabAndroid2, "")
self.tabP4A = QtWidgets.QWidget()
self.tabP4A.setObjectName("tabP4A")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.tabP4A)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollArea_4 = QtWidgets.QScrollArea(self.tabP4A)
self.scrollArea_4.setEnabled(True)
self.scrollArea_4.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_4.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_4.setWidgetResizable(True)
self.scrollArea_4.setObjectName("scrollArea_4")
self.scrollAreaWidgetContents_5 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_5.setGeometry(QtCore.QRect(0, 0, 461, 698))
self.scrollAreaWidgetContents_5.setObjectName("scrollAreaWidgetContents_5")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_5)
self.verticalLayout_3.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout_6 = QtWidgets.QGridLayout()
self.gridLayout_6.setSpacing(6)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_34 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_34.setObjectName("label_34")
self.gridLayout_6.addWidget(self.label_34, 0, 0, 1, 1)
self.checkbox_p4a_branch = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkbox_p4a_branch.setText("")
self.checkbox_p4a_branch.setObjectName("checkbox_p4a_branch")
self.gridLayout_6.addWidget(self.checkbox_p4a_branch, 0, 1, 1, 1)
self.lineedit_p4a_branch = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineedit_p4a_branch.setEnabled(False)
self.lineedit_p4a_branch.setObjectName("lineedit_p4a_branch")
self.gridLayout_6.addWidget(self.lineedit_p4a_branch, 0, 2, 1, 1)
self.label_42 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_42.setObjectName("label_42")
self.gridLayout_6.addWidget(self.label_42, 1, 0, 1, 1)
self.label_43 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_43.setObjectName("label_43")
self.gridLayout_6.addWidget(self.label_43, 2, 0, 1, 1)
self.label_44 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_44.setObjectName("label_44")
self.gridLayout_6.addWidget(self.label_44, 3, 0, 1, 1)
self.label_45 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_45.setObjectName("label_45")
self.gridLayout_6.addWidget(self.label_45, 4, 0, 1, 1)
self.label_46 = QtWidgets.QLabel(self.scrollAreaWidgetContents_5)
self.label_46.setObjectName("label_46")
self.gridLayout_6.addWidget(self.label_46, 5, 0, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(20, 13, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem4, 6, 2, 1, 1)
self.checkbox_p4a_port = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkbox_p4a_port.setText("")
self.checkbox_p4a_port.setObjectName("checkbox_p4a_port")
self.gridLayout_6.addWidget(self.checkbox_p4a_port, 5, 1, 1, 1)
self.checkbox_p4a_bootstrap = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkbox_p4a_bootstrap.setText("")
self.checkbox_p4a_bootstrap.setObjectName("checkbox_p4a_bootstrap")
self.gridLayout_6.addWidget(self.checkbox_p4a_bootstrap, 4, 1, 1, 1)
self.checkbox_p4a_hook = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkbox_p4a_hook.setText("")
self.checkbox_p4a_hook.setObjectName("checkbox_p4a_hook")
self.gridLayout_6.addWidget(self.checkbox_p4a_hook, 3, 1, 1, 1)
self.checkbox_p4a_local_recipes = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkbox_p4a_local_recipes.setText("")
self.checkbox_p4a_local_recipes.setObjectName("checkbox_p4a_local_recipes")
self.gridLayout_6.addWidget(self.checkbox_p4a_local_recipes, 2, 1, 1, 1)
self.checkbox_p4a_source_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_5)
self.checkbox_p4a_source_dir.setText("")
self.checkbox_p4a_source_dir.setObjectName("checkbox_p4a_source_dir")
self.gridLayout_6.addWidget(self.checkbox_p4a_source_dir, 1, 1, 1, 1)
self.lineedit_p4a_port = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineedit_p4a_port.setEnabled(False)
self.lineedit_p4a_port.setObjectName("lineedit_p4a_port")
self.gridLayout_6.addWidget(self.lineedit_p4a_port, 5, 2, 1, 1)
self.lineedit_p4a_bootstrap = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineedit_p4a_bootstrap.setEnabled(False)
self.lineedit_p4a_bootstrap.setObjectName("lineedit_p4a_bootstrap")
self.gridLayout_6.addWidget(self.lineedit_p4a_bootstrap, 4, 2, 1, 1)
self.lineedit_p4a_hook = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineedit_p4a_hook.setEnabled(False)
self.lineedit_p4a_hook.setObjectName("lineedit_p4a_hook")
self.gridLayout_6.addWidget(self.lineedit_p4a_hook, 3, 2, 1, 1)
self.lineedit_p4a_local_recipes = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineedit_p4a_local_recipes.setEnabled(False)
self.lineedit_p4a_local_recipes.setObjectName("lineedit_p4a_local_recipes")
self.gridLayout_6.addWidget(self.lineedit_p4a_local_recipes, 2, 2, 1, 1)
self.lineedit_p4a_source_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_5)
self.lineedit_p4a_source_dir.setEnabled(False)
self.lineedit_p4a_source_dir.setObjectName("lineedit_p4a_source_dir")
self.gridLayout_6.addWidget(self.lineedit_p4a_source_dir, 1, 2, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_6)
self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_5)
self.horizontalLayout.addWidget(self.scrollArea_4)
self.tabWidget_2.addTab(self.tabP4A, "")
self.tabIOS = QtWidgets.QWidget()
self.tabIOS.setObjectName("tabIOS")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.tabIOS)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(6)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.scrollArea_5 = QtWidgets.QScrollArea(self.tabIOS)
self.scrollArea_5.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_5.setFrameShadow(QtWidgets.QFrame.Plain)
self.scrollArea_5.setWidgetResizable(True)
self.scrollArea_5.setObjectName("scrollArea_5")
self.scrollAreaWidgetContents_6 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_6.setGeometry(QtCore.QRect(0, 0, 461, 698))
self.scrollAreaWidgetContents_6.setObjectName("scrollAreaWidgetContents_6")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_6)
self.verticalLayout_4.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_4.setSpacing(6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setSpacing(6)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_66 = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.label_66.setObjectName("label_66")
self.gridLayout_5.addWidget(self.label_66, 1, 0, 1, 1)
self.checkbox_ios_codesign_debug = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_6)
self.checkbox_ios_codesign_debug.setText("")
self.checkbox_ios_codesign_debug.setObjectName("checkbox_ios_codesign_debug")
self.gridLayout_5.addWidget(self.checkbox_ios_codesign_debug, 1, 1, 1, 1)
self.lineedit_ios_codesign_debug = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_6)
self.lineedit_ios_codesign_debug.setEnabled(False)
self.lineedit_ios_codesign_debug.setObjectName("lineedit_ios_codesign_debug")
self.gridLayout_5.addWidget(self.lineedit_ios_codesign_debug, 1, 2, 1, 1)
self.checkbox_ios_kivy_ios_dir = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_6)
self.checkbox_ios_kivy_ios_dir.setText("")
self.checkbox_ios_kivy_ios_dir.setObjectName("checkbox_ios_kivy_ios_dir")
self.gridLayout_5.addWidget(self.checkbox_ios_kivy_ios_dir, 0, 1, 1, 1)
self.label_47 = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.label_47.setObjectName("label_47")
self.gridLayout_5.addWidget(self.label_47, 0, 0, 1, 1)
self.lineedit_ios_kivy_ios_dir = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_6)
self.lineedit_ios_kivy_ios_dir.setEnabled(False)
self.lineedit_ios_kivy_ios_dir.setObjectName("lineedit_ios_kivy_ios_dir")
self.gridLayout_5.addWidget(self.lineedit_ios_kivy_ios_dir, 0, 2, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem5, 3, 2, 1, 1)
self.label_67 = QtWidgets.QLabel(self.scrollAreaWidgetContents_6)
self.label_67.setObjectName("label_67")
self.gridLayout_5.addWidget(self.label_67, 2, 0, 1, 1)
self.checkbox_ios_codesign_release = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_6)
self.checkbox_ios_codesign_release.setText("")
self.checkbox_ios_codesign_release.setObjectName("checkbox_ios_codesign_release")
self.gridLayout_5.addWidget(self.checkbox_ios_codesign_release, 2, 1, 1, 1)
self.lineedit_ios_codesign_release = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_6)
self.lineedit_ios_codesign_release.setEnabled(False)
self.lineedit_ios_codesign_release.setObjectName("lineedit_ios_codesign_release")
self.gridLayout_5.addWidget(self.lineedit_ios_codesign_release, 2, 2, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout_5)
self.scrollArea_5.setWidget(self.scrollAreaWidgetContents_6)
self.horizontalLayout_6.addWidget(self.scrollArea_5)
self.tabWidget_2.addTab(self.tabIOS, "")
self.tabOSX = QtWidgets.QWidget()
self.tabOSX.setObjectName("tabOSX")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.tabOSX)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setSpacing(6)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.scrollArea_7 = QtWidgets.QScrollArea(self.tabOSX)
self.scrollArea_7.setFrameShape(QtWidgets.QFrame.NoFrame)
self.scrollArea_7.setLineWidth(0)
self.scrollArea_7.setWidgetResizable(True)
self.scrollArea_7.setObjectName("scrollArea_7")
self.scrollAreaWidgetContents_8 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_8.setGeometry(QtCore.QRect(0, 0, 461, 698))
self.scrollAreaWidgetContents_8.setObjectName("scrollAreaWidgetContents_8")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents_8)
self.verticalLayout_6.setContentsMargins(11, 11, 11, 11)
self.verticalLayout_6.setSpacing(6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.gridLayout_7 = QtWidgets.QGridLayout()
self.gridLayout_7.setSpacing(6)
self.gridLayout_7.setObjectName("gridLayout_7")
self.label_71 = QtWidgets.QLabel(self.scrollAreaWidgetContents_8)
self.label_71.setObjectName("label_71")
self.gridLayout_7.addWidget(self.label_71, 1, 0, 1, 1)
self.label_72 = QtWidgets.QLabel(self.scrollAreaWidgetContents_8)
self.label_72.setObjectName("label_72")
self.gridLayout_7.addWidget(self.label_72, 2, 0, 1, 1)
self.spinbox_osx_python_version = QtWidgets.QSpinBox(self.scrollAreaWidgetContents_8)
self.spinbox_osx_python_version.setMinimum(2)
self.spinbox_osx_python_version.setProperty("value", 3)
self.spinbox_osx_python_version.setObjectName("spinbox_osx_python_version")
self.gridLayout_7.addWidget(self.spinbox_osx_python_version, 1, 2, 1, 1)
self.lineedit_osx_kivy_version = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_8)
self.lineedit_osx_kivy_version.setEnabled(False)
self.lineedit_osx_kivy_version.setObjectName("lineedit_osx_kivy_version")
self.gridLayout_7.addWidget(self.lineedit_osx_kivy_version, 2, 2, 1, 1)
self.checkbox_osx_kivy_version = QtWidgets.QCheckBox(self.scrollAreaWidgetContents_8)
self.checkbox_osx_kivy_version.setText("")
self.checkbox_osx_kivy_version.setObjectName("checkbox_osx_kivy_version")
self.gridLayout_7.addWidget(self.checkbox_osx_kivy_version, 2, 1, 1, 1)
self.label_73 = QtWidgets.QLabel(self.scrollAreaWidgetContents_8)
self.label_73.setObjectName("label_73")
self.gridLayout_7.addWidget(self.label_73, 0, 0, 1, 1)
self.lineedit_author = QtWidgets.QLineEdit(self.scrollAreaWidgetContents_8)
self.lineedit_author.setObjectName("lineedit_author")
self.gridLayout_7.addWidget(self.lineedit_author, 0, 2, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_7)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem6)
self.scrollArea_7.setWidget(self.scrollAreaWidgetContents_8)
self.horizontalLayout_8.addWidget(self.scrollArea_7)
self.tabWidget_2.addTab(self.tabOSX, "")
self.verticalLayout_5.addWidget(self.splitterMain)
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1000, 26))
self.menuBar.setObjectName("menuBar")
self.menuFile = QtWidgets.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
self.menuRecentFiles = QtWidgets.QMenu(self.menuFile)
self.menuRecentFiles.setObjectName("menuRecentFiles")
self.menuSettings = QtWidgets.QMenu(self.menuBar)
self.menuSettings.setObjectName("menuSettings")
self.menuClean = QtWidgets.QMenu(self.menuSettings)
self.menuClean.setObjectName("menuClean")
self.menuHelp = QtWidgets.QMenu(self.menuBar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menuBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionSettings = QtWidgets.QAction(MainWindow)
self.actionSettings.setObjectName("actionSettings")
self.actionAbout = QtWidgets.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.actionLoad = QtWidgets.QAction(MainWindow)
self.actionLoad.setObjectName("actionLoad")
self.actionSave = QtWidgets.QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionSave_As = QtWidgets.QAction(MainWindow)
self.actionSave_As.setObjectName("actionSave_As")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionClean_2 = QtWidgets.QAction(MainWindow)
self.actionClean_2.setObjectName("actionClean_2")
self.actionDist_Clean = QtWidgets.QAction(MainWindow)
self.actionDist_Clean.setObjectName("actionDist_Clean")
self.actionSettings_2 = QtWidgets.QAction(MainWindow)
self.actionSettings_2.setObjectName("actionSettings_2")
self.actionSubmit_Issue = QtWidgets.QAction(MainWindow)
self.actionSubmit_Issue.setObjectName("actionSubmit_Issue")
self.actionDeploy = QtWidgets.QAction(MainWindow)
self.actionDeploy.setObjectName("actionDeploy")
self.actionRun = QtWidgets.QAction(MainWindow)
self.actionRun.setObjectName("actionRun")
self.actionClear_Recent_Files = QtWidgets.QAction(MainWindow)
self.actionClear_Recent_Files.setObjectName("actionClear_Recent_Files")
self.actionSettings_3 = QtWidgets.QAction(MainWindow)
self.actionSettings_3.setObjectName("actionSettings_3")
self.actionSettings_4 = QtWidgets.QAction(MainWindow)
self.actionSettings_4.setObjectName("actionSettings_4")
self.actionServe = QtWidgets.QAction(MainWindow)
self.actionServe.setObjectName("actionServe")
self.menuRecentFiles.addSeparator()
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionLoad)
self.menuFile.addAction(self.menuRecentFiles.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSave_As)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuClean.addAction(self.actionClean_2)
self.menuClean.addAction(self.actionDist_Clean)
self.menuSettings.addAction(self.menuClean.menuAction())
self.menuSettings.addAction(self.actionSettings)
self.menuSettings.addAction(self.actionDeploy)
self.menuSettings.addAction(self.actionRun)
self.menuSettings.addAction(self.actionServe)
self.menuSettings.addSeparator()
self.menuSettings.addAction(self.actionSettings_4)
self.menuHelp.addAction(self.actionSubmit_Issue)
self.menuHelp.addSeparator()
self.menuHelp.addAction(self.actionAbout)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuSettings.menuAction())
self.menuBar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabAdvanced.setCurrentIndex(0)
self.tabWidget_2.setCurrentIndex(0)
self.checkbox_source_exclude_dirs.toggled['bool'].connect(self.lineedit_source_exclude_dirs.setEnabled)
self.checkbox_source_exclude_patterns.toggled['bool'].connect(self.lineedit_source_exclude_patterns.setEnabled)
self.checkbox_source_exclude_exts.toggled['bool'].connect(self.lineedit_source_exclude_exts.setEnabled)
self.checkbox_source_include_patterns.toggled['bool'].connect(self.lineedit_source_include_patterns.setEnabled)
self.checkbox_source_include_exts.toggled['bool'].connect(self.lineedit_source_include_exts.setEnabled)
self.checkbox_services.toggled['bool'].connect(self.lineedit_services.setEnabled)
self.checkbox_android_ant_path.toggled['bool'].connect(self.lineedit_android_ant_path.setEnabled)
self.checkbox_android_ndk_path.toggled['bool'].connect(self.lineedit_android_ndk_path.setEnabled)
self.checkbox_android_presplash_color.toggled['bool'].connect(self.combobox_android_presplash_color.setEnabled)
self.checkbox_android_sdk_path.toggled['bool'].connect(self.lineedit_android_sdk_path.setEnabled)
self.checkbox_requirements.toggled['bool'].connect(self.lineedit_requirements.setEnabled)
self.checkbox_garden_requirements.toggled['bool'].connect(self.lineedit_garden_requirements.setEnabled)
self.checkbox_presplash_filename.toggled['bool'].connect(self.lineedit_presplash_filename.setEnabled)
self.checkbox_icon_filename.toggled['bool'].connect(self.lineedit_icon_filename.setEnabled)
self.checkbox_android_ant_path.toggled['bool'].connect(self.lineedit_android_ant_path.setEnabled)
self.checkbox_android_entrypoint.toggled['bool'].connect(self.lineedit_android_entrypoint.setEnabled)
self.checkbox_android_add_aars.toggled['bool'].connect(self.lineedit_android_add_aars.setEnabled)
self.checkbox_android_add_java_src.toggled['bool'].connect(self.lineedit_android_add_java_src.setEnabled)
self.checkbox_android_gradle_dependencies.toggled['bool'].connect(self.lineedit_android_gradle_dependencies.setEnabled)
self.checkbox_android_add_activities.toggled['bool'].connect(self.lineedit_android_add_activities.setEnabled)
self.checkbox_android_manifest_intent_filters.toggled['bool'].connect(self.lineedit_android_manifest_intent_filters.setEnabled)
self.checkbox_android_manifest_launch_mode.toggled['bool'].connect(self.lineedit_android_manifest_launch_mode.setEnabled)
self.checkbox_android_meta_data.toggled['bool'].connect(self.lineedit_android_meta_data.setEnabled)
self.checkbox_android_library_references.toggled['bool'].connect(self.lineedit_android_library_references.setEnabled)
self.checkbox_android_logcat_filters.toggled['bool'].connect(self.lineedit_android_logcat_filters.setEnabled)
self.checkbox_android_copy_libs.toggled['bool'].connect(self.lineedit_android_copy_libs.setEnabled)
self.checkbox_android_whitelist.toggled['bool'].connect(self.lineedit_android_whitelist.setEnabled)
self.checkbox_android_blacklist.toggled['bool'].connect(self.lineedit_android_blacklist.setEnabled)
self.checkbox_android_add_libs_armeabi.toggled['bool'].connect(self.lineedit_android_add_libs_armeabi.setEnabled)
self.checkbox_android_add_libs_armeabi_v7a.toggled['bool'].connect(self.lineedit_android_add_libs_armeabi_v7a.setEnabled)
self.checkbox_android_add_libs_x86.toggled['bool'].connect(self.lineedit_android_add_libs_x86.setEnabled)
self.checkbox_android_add_libs_mips.toggled['bool'].connect(self.lineedit_android_add_libs_mips.setEnabled)
self.checkbox_android_ouya_icon_filename.toggled['bool'].connect(self.lineedit_android_ouya_icon_filename.setEnabled)
self.checkbox_p4a_branch.toggled['bool'].connect(self.lineedit_p4a_branch.setEnabled)
self.checkbox_p4a_source_dir.toggled['bool'].connect(self.lineedit_p4a_source_dir.setEnabled)
self.checkbox_p4a_local_recipes.toggled['bool'].connect(self.lineedit_p4a_local_recipes.setEnabled)
self.checkbox_p4a_hook.toggled['bool'].connect(self.lineedit_p4a_hook.setEnabled)
self.checkbox_p4a_bootstrap.toggled['bool'].connect(self.lineedit_p4a_bootstrap.setEnabled)
self.checkbox_p4a_port.toggled['bool'].connect(self.lineedit_p4a_port.setEnabled)
self.checkbox_ios_kivy_ios_dir.toggled['bool'].connect(self.lineedit_ios_kivy_ios_dir.setEnabled)
self.checkbox_ios_codesign_debug.toggled['bool'].connect(self.lineedit_ios_codesign_debug.setEnabled)
self.checkbox_ios_codesign_release.toggled['bool'].connect(self.lineedit_ios_codesign_release.setEnabled)
self.checkbox_osx_kivy_version.toggled['bool'].connect(self.lineedit_osx_kivy_version.setEnabled)
self.menuBar.triggered['QAction*'].connect(MainWindow.onMenubar)
self.checkbox_bin_dir.toggled['bool'].connect(self.lineedit_bin_dir.setEnabled)
self.checkbox_build_dir.toggled['bool'].connect(self.lineedit_build_dir.setEnabled)
self.refreshPreview.clicked.connect(MainWindow.refreshPreview)
self.checkbox_android_permissions.toggled['bool'].connect(self.lineedit_android_permissions.setEnabled)
self.checkbox_android_ouya_category.toggled['bool'].connect(self.combobox_android_ouya_category.setEnabled)
self.refreshSystem.clicked.connect(MainWindow.refreshSystemDetails)
self.checkbox_version_regex.toggled['bool'].connect(self.lineedit_version_regex.setEnabled)
self.checkbox_version_filename.toggled['bool'].connect(self.lineedit_version_filename.setEnabled)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_9.setText(_translate("MainWindow", "Source Exclude Dirs:"))
self.toolbutton_source_dir.setText(_translate("MainWindow", "..."))
self.lineedit_source_exclude_exts.setToolTip(_translate("MainWindow", "# (list) Source files to exclude (let empty to not exclude anything)"))
self.lineedit_source_exclude_exts.setText(_translate("MainWindow", "spec"))
self.label_31.setText(_translate("MainWindow", "Source"))
self.lineedit_requirements.setText(_translate("MainWindow", "sqlite3,kivy"))
self.lineedit_icon_filename.setText(_translate("MainWindow", "%(source.dir)s/data/icon.png"))
self.label_77.setText(_translate("MainWindow", "Presplash Image:"))
self.label_4.setText(_translate("MainWindow", "Domain:"))
self.label_78.setText(_translate("MainWindow", "Application Requirements:"))
self.lineedit_source_include_exts.setToolTip(_translate("MainWindow", "# (list) Source files to include (let empty to include all the files)"))
self.lineedit_source_include_exts.setText(_translate("MainWindow", "py,png,jpg,kv,atlas"))
self.label_76.setText(_translate("MainWindow", "Garden Requirements:"))
self.lineedit_presplash_filename.setToolTip(_translate("MainWindow", "Presplash of the application"))
self.lineedit_presplash_filename.setText(_translate("MainWindow", "%(source.dir)s/data/presplash.png"))
self.label_5.setText(_translate("MainWindow", "Source Dir:"))
self.label_75.setText(_translate("MainWindow", "Application Icon:"))
self.lineedit_package_name.setText(_translate("MainWindow", "myapp"))
self.label_6.setText(_translate("MainWindow", "Source Include Exts:"))
self.label_79.setText(_translate("MainWindow", "Application"))
self.lineedit_title.setText(_translate("MainWindow", "AppTitle"))
self.label_8.setText(_translate("MainWindow", "Source Exclude Exts:"))
self.lineedit_source_dir.setToolTip(_translate("MainWindow", "Source code where the main.py"))
self.lineedit_source_dir.setText(_translate("MainWindow", "."))
self.lineedit_source_exclude_patterns.setToolTip(_translate("MainWindow", "# (list) List of exclusions using pattern matching"))
self.lineedit_source_exclude_patterns.setText(_translate("MainWindow", "license,images/*/*.jpg"))
self.lineedit_source_exclude_dirs.setToolTip(_translate("MainWindow", "List of directory to exclude (let empty to not exclude anything)"))
self.lineedit_source_exclude_dirs.setText(_translate("MainWindow", "tests,bin"))
self.label_2.setText(_translate("MainWindow", "Package Name:"))
self.label_3.setText(_translate("MainWindow", "Version:"))
self.label_10.setText(_translate("MainWindow", "Source Exclude Patterns:"))
self.lineedit_services.setToolTip(_translate("MainWindow", "List of service to declare"))
self.lineedit_services.setText(_translate("MainWindow", "NAME:ENTRYPOINT_TO_PY,NAME2:ENTRYPOINT2_TO_PY"))
self.lineedit_version.setText(_translate("MainWindow", "0.1"))
self.label.setText(_translate("MainWindow", "Title:"))
self.lineedit_package_domain.setToolTip(_translate("MainWindow", "# (str) Package domain (needed for android/ios packaging)"))
self.lineedit_package_domain.setText(_translate("MainWindow", "org.test"))
self.label_11.setText(_translate("MainWindow", "Orientation:"))
self.label_26.setText(_translate("MainWindow", "Services:"))
self.combobox_orientation.setToolTip(_translate("MainWindow", "# (str) Supported orientation (one of landscape, portrait or all)"))
self.combobox_orientation.setItemText(0, _translate("MainWindow", "landscape"))
self.combobox_orientation.setItemText(1, _translate("MainWindow", "portrait"))
self.combobox_orientation.setItemText(2, _translate("MainWindow", "all"))
self.label_7.setText(_translate("MainWindow", "Source Include Patterns:"))
self.lineedit_source_include_patterns.setToolTip(_translate("MainWindow", "# (list) List of inclusions using pattern matching"))
self.lineedit_source_include_patterns.setText(_translate("MainWindow", "assets/*,images/*.png"))
self.label_32.setText(_translate("MainWindow", "Version (regex)"))
self.lineedit_version_regex.setText(_translate("MainWindow", "__version__ = [\'\"](.*)[\'\"]"))
self.label_64.setText(_translate("MainWindow", "Version (filename) "))
self.lineedit_version_filename.setText(_translate("MainWindow", "%(source.dir)s/main.py"))
self.tabAdvanced.setTabText(self.tabAdvanced.indexOf(self.Project), _translate("MainWindow", "Project"))
self.label_65.setText(_translate("MainWindow", "Bin Dir:"))
self.label_83.setText(_translate("MainWindow", "Build Dir:"))
self.toolbutton_bin_dir.setText(_translate("MainWindow", "..."))
self.toolbutton_build_dir.setText(_translate("MainWindow", "..."))
self.lineedit_build_dir.setText(_translate("MainWindow", "./.buildozer"))
self.lineedit_bin_dir.setToolTip(_translate("MainWindow", "(str) Path to build output (i.e. .apk, .ipa) storage"))
self.lineedit_bin_dir.setText(_translate("MainWindow", "./bin"))
self.tabAdvanced.setTabText(self.tabAdvanced.indexOf(self.tabProject), _translate("MainWindow", "Build Environment"))
self.refreshPreview.setText(_translate("MainWindow", "Refresh"))
self.tabAdvanced.setTabText(self.tabAdvanced.indexOf(self.tab), _translate("MainWindow", "Preview Spec"))
self.refreshSystem.setText(_translate("MainWindow", "Refresh"))
self.textEditSystem.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.tabAdvanced.setTabText(self.tabAdvanced.indexOf(self.tab_2), _translate("MainWindow", "System"))
self.label_21.setText(_translate("MainWindow", "Android Min API:"))
self.lineedit2_android_minapi.setToolTip(_translate("MainWindow", "Minimum API required"))
self.lineedit2_android_minapi.setText(_translate("MainWindow", "19"))
self.label_13.setText(_translate("MainWindow", "Android ANT Path:"))
self.label_20.setText(_translate("MainWindow", "Android API:"))
self.lineedit_android_ant_path.setToolTip(_translate("MainWindow", "Android ANT directory (if empty, it will be automatically downloaded.)"))
self.toolbutton_android_ant_path.setText(_translate("MainWindow", "..."))
self.label_25.setText(_translate("MainWindow", "Android Arch:"))
self.combobox_android_arch.setToolTip(_translate("MainWindow", "The Android arch to build for"))
self.combobox_android_arch.setItemText(0, _translate("MainWindow", "armeabi-v7a"))
self.combobox_android_arch.setItemText(1, _translate("MainWindow", "arm64-v8a"))
self.combobox_android_arch.setItemText(2, _translate("MainWindow", "x86"))
self.lineedit2_android_api.setToolTip(_translate("MainWindow", " Android API to use"))
self.lineedit2_android_api.setText(_translate("MainWindow", "19"))
self.label_22.setText(_translate("MainWindow", "Android SDK:"))
self.label_16.setText(_translate("MainWindow", "Skip SDK Update:"))
self.label_19.setText(_translate("MainWindow", "Android NDK:"))
self.label_23.setText(_translate("MainWindow", "Permissions:"))
self.label_12.setText(_translate("MainWindow", "Fullscreen:"))
self.permissions_android_permissions.setText(_translate("MainWindow", "..."))
self.label_14.setText(_translate("MainWindow", "Presplash Color:"))
self.label_18.setText(_translate("MainWindow", "Android SDK Path:"))
self.colorpick_android_presplash_color.setText(_translate("MainWindow", "..."))
self.checkbox2_android_private_storage.setToolTip(_translate("MainWindow", "(bool) Use --private data storage (True) or --dir public storage (False)"))
self.label_24.setText(_translate("MainWindow", "Private Storage:"))
self.label_58.setText(_translate("MainWindow", "Wakelock:"))
self.checkbox2_android_wakelock.setToolTip(_translate("MainWindow", "# (bool) Indicate whether the screen should stay on\n"
"# Don\'t forget to add the WAKE_LOCK permission if you set this to True"))
self.lineedit_android_permissions.setText(_translate("MainWindow", "INTERNET"))
self.checkbox2_android_skip_update.setToolTip(_translate("MainWindow", "If checked, then skip trying to update the Android sdk"))
self.toolbutton_android_sdk_path.setText(_translate("MainWindow", "..."))
self.lineedit_android_sdk_path.setToolTip(_translate("MainWindow", "Android SDK directory (if empty, it will be automatically downloaded.)"))
self.lineedit2_android_sdk.setText(_translate("MainWindow", "20"))
self.label_17.setText(_translate("MainWindow", "Android NDK Path:"))
self.lineedit2_android_ndk.setToolTip(_translate("MainWindow", "Android NDK version to use"))
self.lineedit2_android_ndk.setText(_translate("MainWindow", "9c"))
self.lineedit_android_ndk_path.setToolTip(_translate("MainWindow", "Android NDK directory (if empty, it will be automatically downloaded.)"))
self.toolbutton_android_ndk_path.setText(_translate("MainWindow", "..."))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabAndroid), _translate("MainWindow", "Android"))
self.label_28.setToolTip(_translate("MainWindow", " # (list) Android AAR archives to add (currently works only with sdl2_gradle\n"
" # bootstrap)"))
self.label_28.setText(_translate("MainWindow", "Android AAR Archives:"))
self.label_15.setText(_translate("MainWindow", "Android Jars:"))
self.lineedit_android_add_jars.setToolTip(_translate("MainWindow", "# (list) List of Java .jar files to add to the libs so that pyjnius can access\n"
" # their classes. Don\'t add jars that you do not need, since extra jars can slow\n"
" # down the build process. Allows wildcards matching, for example:\n"
" # OUYA-ODK/libs/*.jar\n"
" #android_add_jars: foo.jar,bar.jar,path/to/more/*.jar"))
self.lineedit_android_add_jars.setText(_translate("MainWindow", "foo.jar,bar.jar,path/to/more/*.jar"))
self.lineedit_android_add_aars.setToolTip(_translate("MainWindow", "# (list) Android AAR archives to add (currently works only with sdl2_gradle\n"
"# bootstrap"))
self.label_33.setText(_translate("MainWindow", "Android Entry Point:"))
self.lineedit_android_entrypoint.setToolTip(_translate("MainWindow", "(str) Android entry point, default is ok for Kivy-based app"))
self.lineedit_android_entrypoint.setText(_translate("MainWindow", "org.renpy.android.PythonActivity"))
self.label_57.setText(_translate("MainWindow", "Manifest Launch Mode:"))
self.label_39.setText(_translate("MainWindow", "Activities (Java classes):"))
self.label_27.setToolTip(_translate("MainWindow", "# (list) List of Java files to add to the android project (can be java or a\n"
" # directory containing the files)"))
self.label_27.setText(_translate("MainWindow", "Java Source:"))
self.label_56.setText(_translate("MainWindow", "Manifest Intent Filters:"))
self.lineedit_android_manifest_intent_filters.setToolTip(_translate("MainWindow", "# (str) XML file to include as an intent filters in <activity> tag"))
self.label_38.setText(_translate("MainWindow", "Gradle Dependencies:"))
self.lineedit_android_add_java_src.setToolTip(_translate("MainWindow", "# (list) List of Java files to add to the android project (can be java or a\n"
"# directory containing the files)"))
self.lineedit_android_gradle_dependencies.setToolTip(_translate("MainWindow", "(list) Gradle dependencies to add (currently works only with sdl2_gradle # bootstrap)"))
self.lineedit_android_add_activities.setToolTip(_translate("MainWindow", "# (list) Java classes to add as activities to the manifest."))
self.lineedit_android_add_activities.setText(_translate("MainWindow", "com.example.ExampleActivity"))
self.label_53.setText(_translate("MainWindow", "MIPS Libs"))
self.label_51.setText(_translate("MainWindow", "Armeabi v7a Libs"))
self.label_61.setText(_translate("MainWindow", "Library References:"))
self.lineedit_android_add_libs_mips.setText(_translate("MainWindow", "libs/android-mips/*.so"))
self.lineedit_android_meta_data.setToolTip(_translate("MainWindow", "# (list) Android application meta-data to set (key=value format)"))
self.lineedit_android_manifest_launch_mode.setToolTip(_translate("MainWindow", "# (str) launchMode to set for the main activity"))
self.lineedit_android_manifest_launch_mode.setText(_translate("MainWindow", "standard"))
self.label_60.setText(_translate("MainWindow", "Logcat Filters:"))
self.label_37.setToolTip(_translate("MainWindow", "# (list) Android additionnal libraries to copy into libs/armeabi"))
self.label_37.setText(_translate("MainWindow", "Additional Libraries"))
self.label_63.setText(_translate("MainWindow", "Copy Libraries:"))
self.label_50.setText(_translate("MainWindow", "Armeabi Libs"))
self.lineedit_android_copy_libs.setToolTip(_translate("MainWindow", "(bool) Copy library instead of making a libpymodules.so"))
self.label_59.setText(_translate("MainWindow", "Android Meta-data:"))
self.lineedit_android_add_libs_armeabi_v7a.setText(_translate("MainWindow", "libs/android-v7/*.so"))
self.lineedit_android_add_libs_armeabi.setText(_translate("MainWindow", "libs/android/*.so"))
self.label_52.setText(_translate("MainWindow", "x86 Libs"))
self.lineedit_android_logcat_filters.setToolTip(_translate("MainWindow", "# (str) Android logcat filters to use"))
self.lineedit_android_logcat_filters.setText(_translate("MainWindow", "*:S python:D"))
self.lineedit_android_add_libs_x86.setText(_translate("MainWindow", "libs/android-x86/*.so"))
self.lineedit_android_library_references.setToolTip(_translate("MainWindow", "# (list) Android library project to add (will be added in the\n"
"# project.properties automatically.)"))
self.combobox_android_ouya_category.setToolTip(_translate("MainWindow", "# (str) OUYA Console category. Should be one of GAME or APP\n"
"# If you leave this blank, OUYA support will not be enabled"))
self.combobox_android_ouya_category.setItemText(0, _translate("MainWindow", "APP"))
self.combobox_android_ouya_category.setItemText(1, _translate("MainWindow", "GAME"))
self.label_36.setText(_translate("MainWindow", "Ouya Icon: (732x412 PNG)"))
self.label_54.setText(_translate("MainWindow", "Ouya"))
self.label_35.setText(_translate("MainWindow", "Ouya Category:"))
self.lineedit_android_ouya_icon_filename.setToolTip(_translate("MainWindow", "# (str) Filename of OUYA Console icon. It must be a 732x412 png image."))
self.lineedit_android_ouya_icon_filename.setText(_translate("MainWindow", "%(source.dir)s/data/ouya_icon.png"))
self.label_68.setText(_translate("MainWindow", "Android Blacklist:"))
self.label_69.setText(_translate("MainWindow", "Android Whitelist:"))
self.lineedit_android_blacklist.setToolTip(_translate("MainWindow", "blacklist file"))
self.lineedit_android_whitelist.setToolTip(_translate("MainWindow", "(list) Pattern to whitelist for the whole project"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabAndroid2), _translate("MainWindow", "Android (Advanced)"))
self.tabP4A.setToolTip(_translate("MainWindow", "Python for android (p4a) specific"))
self.label_34.setText(_translate("MainWindow", "P4A Branch:"))
self.lineedit_p4a_branch.setToolTip(_translate("MainWindow", "(str) python-for-android branch to use, defaults to stable"))
self.lineedit_p4a_branch.setText(_translate("MainWindow", "stable"))
self.label_42.setText(_translate("MainWindow", "P4A Source Dir:"))
self.label_43.setText(_translate("MainWindow", "P4A Recipes Dir:"))
self.label_44.setText(_translate("MainWindow", "P4A Hook:"))
self.label_45.setText(_translate("MainWindow", "P4A Bootstrap:"))
self.label_46.setText(_translate("MainWindow", "P4A Port:"))
self.lineedit_p4a_port.setToolTip(_translate("MainWindow", "(int) port number to specify an explicit --port= p4a argument (eg for bootstrap flask)"))
self.lineedit_p4a_bootstrap.setToolTip(_translate("MainWindow", "(str) Bootstrap to use for android builds"))
self.lineedit_p4a_bootstrap.setText(_translate("MainWindow", "sdl2"))
self.lineedit_p4a_hook.setToolTip(_translate("MainWindow", "(str) Filename to the hook for p4a"))
self.lineedit_p4a_local_recipes.setToolTip(_translate("MainWindow", "(str) The directory in which python-for-android should look for your own build recipes (if any)"))
self.lineedit_p4a_source_dir.setToolTip(_translate("MainWindow", "(str) python-for-android git clone directory (if empty, it will be automatically cloned from github)"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabP4A), _translate("MainWindow", "Python-For-Android"))
self.label_66.setText(_translate("MainWindow", "IOS Codesign Debug:"))
self.lineedit_ios_codesign_debug.setToolTip(_translate("MainWindow", "# (str) Name of the certificate to use for signing the debug version\n"
"# Get a list of available identities: buildozer ios list_identities"))
self.lineedit_ios_codesign_debug.setText(_translate("MainWindow", "\"iPhone Developer: <lastname> <firstname> (<hexstring>)\""))
self.label_47.setText(_translate("MainWindow", "IOS Kivy Dir:"))
self.lineedit_ios_kivy_ios_dir.setToolTip(_translate("MainWindow", "(str) Path to a custom kivy-ios folder"))
self.lineedit_ios_kivy_ios_dir.setText(_translate("MainWindow", "../kivy-ios"))
self.label_67.setText(_translate("MainWindow", "IOS Codesign Release:"))
self.lineedit_ios_codesign_release.setToolTip(_translate("MainWindow", "(str) Name of the certificate to use for signing the release version"))
self.lineedit_ios_codesign_release.setText(_translate("MainWindow", "%(ios.codesign_debug)s"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabIOS), _translate("MainWindow", "iOS"))
self.label_71.setText(_translate("MainWindow", "OSX Python Version:"))
self.label_72.setText(_translate("MainWindow", "OSX Kivy Version:"))
self.spinbox_osx_python_version.setToolTip(_translate("MainWindow", "# change the major version of python used by the app"))
self.lineedit_osx_kivy_version.setToolTip(_translate("MainWindow", "# Kivy version to use"))
self.lineedit_osx_kivy_version.setText(_translate("MainWindow", "1.9.1"))
self.label_73.setText(_translate("MainWindow", "Author"))
self.lineedit_author.setText(_translate("MainWindow", "© Copyright Info"))
self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tabOSX), _translate("MainWindow", "OSX"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuRecentFiles.setTitle(_translate("MainWindow", "Recent Files..."))
self.menuSettings.setTitle(_translate("MainWindow", "Tools"))
self.menuClean.setTitle(_translate("MainWindow", "Clean"))
self.menuHelp.setTitle(_translate("MainWindow", "Help"))
self.actionSettings.setText(_translate("MainWindow", "Build"))
self.actionSettings.setShortcut(_translate("MainWindow", "F5"))
self.actionAbout.setText(_translate("MainWindow", "About..."))
self.actionAbout.setShortcut(_translate("MainWindow", "F1"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionNew.setShortcut(_translate("MainWindow", "Ctrl+N"))
self.actionLoad.setText(_translate("MainWindow", "Load"))
self.actionLoad.setShortcut(_translate("MainWindow", "Ctrl+O"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.actionSave_As.setText(_translate("MainWindow", "Save As..."))
self.actionSave_As.setShortcut(_translate("MainWindow", "Ctrl+Shift+S"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionExit.setShortcut(_translate("MainWindow", "Ctrl+Q"))
self.actionClean_2.setText(_translate("MainWindow", "Clean"))
self.actionDist_Clean.setText(_translate("MainWindow", "Dist Clean"))
self.actionSettings_2.setText(_translate("MainWindow", "Settings"))
self.actionSettings_2.setShortcut(_translate("MainWindow", "F10"))
self.actionSubmit_Issue.setText(_translate("MainWindow", "Submit Issue"))
self.actionSubmit_Issue.setShortcut(_translate("MainWindow", "F2"))
self.actionDeploy.setText(_translate("MainWindow", "Deploy"))
self.actionDeploy.setShortcut(_translate("MainWindow", "F6"))
self.actionRun.setText(_translate("MainWindow", "Run"))
self.actionRun.setShortcut(_translate("MainWindow", "F7"))
self.actionClear_Recent_Files.setText(_translate("MainWindow", "Clear Recent Files"))
self.actionSettings_3.setText(_translate("MainWindow", "Settings"))
self.actionSettings_4.setText(_translate("MainWindow", "Settings"))
self.actionSettings_4.setShortcut(_translate("MainWindow", "F10"))
self.actionServe.setText(_translate("MainWindow", "Serve"))
|
swprojects/Buildertron
|
setup.py
|
<filename>setup.py
#
# buildertron
#
import subprocess
from setuptools import setup, find_packages, Command
from info import (__projectname__, __version__, __homepage__, __author__,
__classifiers__, __readme__, __history__, __description__,
__author_email__)
class TestCommand(Command):
user_options = []
def initialize_options(self):
return
def finalize_options(self):
return
def run(self):
subprocess.call(['flake8', '--append-config=.flake8.ini'])
cmd_classes = {
'test': TestCommand,
}
setup(name=__projectname__,
version=__version__,
description=__description__,
long_description=__readme__ + '\n\n' + __history__,
author=__author__,
author_email=__author_email__,
url=__homepage__,
license='MIT',
packages=find_packages(exclude=['docs', 'resources', 'snap', 'tests*']),
package_data={'': ["icons/*.png", "splash.png", "buildertron.png"]},
data_files=[('share/applications', ['data/Buildertron.desktop']),
('share/buildertron', ['buildertron/buildertron.png'])],
install_requires=[
'pyqt5==5.10.0;python_version<"3.6"',
'pyqt5;python_version>="3.6"'
],
classifiers=__classifiers__,
test_suite='tests',
tests_require=[],
cmdclass=cmd_classes,
entry_points={'gui_scripts': ['buildertron = buildertron.buildertron:main']}
)
|
swprojects/Buildertron
|
buildertron/dialogs/settingsdialog.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*
"""
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from forms.uisettingsdialog import Ui_SettingsDialog
from PyQt5.QtWidgets import QDialog
from functools import partial
import logging
class SettingsDialog(QDialog, Ui_SettingsDialog):
def __init__(self, parent):
super(SettingsDialog, self).__init__(parent)
self.parent = parent
self.configDefaults = {}
self.config = {}
self.ui = Ui_SettingsDialog()
self.ui.setupUi(self)
self.checkBoxes = {}
self.uiObjects = {}
self.collectUiObjects()
self.open()
def collectUiObjects(self):
logging.debug(self.ui.__dict__.keys())
for n, obj in self.ui.__dict__.items():
if n.startswith('checkBox_'):
self.checkBoxes[n[9:]] = obj
elif n.startswith('lineEdit_'):
self.uiObjects[n[9:]] = obj
elif n.startswith('textEdit_'):
self.uiObjects[n[9:]] = obj
elif n.startswith('default_'):
obj.clicked.connect(partial(self.restoreDefaultCommand, n[8:]))
def onApply(self, button):
if button.text() != 'Apply':
return
value = self.value()
self.parent.updateConfig(value)
def restoreDefaultCommand(self, command):
if command == 'CustomSpec':
value = self.configDefaults.get('custom_spec', '')
self.uiObjects[command].setText(str(value))
return
c = 'override_{0}_cmd'.format(command.lower())
try:
value = self.configDefaults.get(c, None)
self.uiObjects[command].setText(value)
except Exception as e:
logging.debug(e)
def updateGeneral(self):
config = self.config
ask_save_on_close = config.get('ask_save_on_close', True)
if ask_save_on_close:
self.checkBoxes['AskSaveOnClose'].setCheckState(2)
else:
self.checkBoxes['AskSaveOnClose'].setCheckState(0)
show_splashscreen = config.get('show_splashscreen', True)
if show_splashscreen:
self.checkBoxes['SplashScreen'].setCheckState(2)
else:
self.checkBoxes['SplashScreen'].setCheckState(0)
keep_recent_files = config.get('keep_recent_files', True)
if keep_recent_files:
self.checkBoxes['RecentFiles'].setCheckState(2)
else:
self.checkBoxes['RecentFiles'].setCheckState(0)
def updateBlacklist(self):
config = self.config
use_app_blacklist = config.get('use_app_blacklist', False)
if use_app_blacklist:
self.checkBoxes['BlacklistApp'].setCheckState(2)
else:
self.checkBoxes['BlacklistApp'].setCheckState(0)
use_buildozer_blacklist = config.get('use_buildozer_blacklist', False)
if use_buildozer_blacklist:
self.checkBoxes['BlacklistBuildozer'].setCheckState(2)
else:
self.checkBoxes['BlacklistBuildozer'].setCheckState(0)
app_blacklist = config.get('app_blacklist', '')
self.uiObjects['BlacklistApp'].insertPlainText(str(app_blacklist))
buildozer_blacklist = config.get('buildozer_blacklist', '')
self.uiObjects['BlacklistBuildozer'].insertPlainText(str(buildozer_blacklist))
def updateSubstitute(self):
config = self.config
use_app_substitute = config.get('use_app_substitute', False)
if use_app_substitute:
self.checkBoxes['SubApp'].setCheckState(2)
else:
self.checkBoxes['SubApp'].setCheckState(0)
use_buildozer_substitute = config.get('use_buildozer_substitute', False)
if use_buildozer_substitute:
self.checkBoxes['SubBuildozer'].setCheckState(2)
else:
self.checkBoxes['SubBuildozer'].setCheckState(0)
app_substitute = config.get('app_substitute', '')
self.uiObjects['SubApp'].insertPlainText(str(app_substitute))
buildozer_substitute = config.get('buildozer_substitute', '')
self.uiObjects['SubBuildozer'].insertPlainText(str(buildozer_substitute))
def updateCommands(self):
config = self.config
override = config.get('override_build', False)
if override:
self.checkBoxes['Build'].setCheckState(2)
else:
self.checkBoxes['Build'].setCheckState(0)
override = config.get('override_clean', False)
if override:
self.checkBoxes['Clean'].setCheckState(2)
else:
self.checkBoxes['Clean'].setCheckState(0)
override = config.get('override_deploy', False)
if override:
self.checkBoxes['Deploy'].setCheckState(2)
else:
self.checkBoxes['Deploy'].setCheckState(0)
override = config.get('override_distclean', False)
if override:
self.checkBoxes['Distclean'].setCheckState(2)
else:
self.checkBoxes['Distclean'].setCheckState(0)
override = config.get('override_run', False)
if override:
self.checkBoxes['Run'].setCheckState(2)
else:
self.checkBoxes['Run'].setCheckState(0)
override = config.get('override_serve', False)
if override:
self.checkBoxes['Serve'].setCheckState(2)
else:
self.checkBoxes['Serve'].setCheckState(0)
override = config.get('override_terminal', False)
if override:
self.checkBoxes['Terminal'].setCheckState(2)
else:
self.checkBoxes['Terminal'].setCheckState(0)
override = config.get('override_build_cmd', 'buildozer {target} build')
self.uiObjects['Build'].setText(str(override))
override = config.get('override_clean_cmd', 'buildozer {target} clean')
self.uiObjects['Clean'].setText(str(override))
override = config.get('override_deploy_cmd', 'buildozer {target} deploy')
self.uiObjects['Deploy'].setText(str(override))
override = config.get('override_distclean_cmd', 'buildozer distclean')
self.uiObjects['Distclean'].setText(str(override))
override = config.get('override_run_cmd', 'buildozer {target} run')
self.uiObjects['Run'].setText(str(override))
override = config.get('override_serve_cmd', 'buildozer {target} serve')
self.uiObjects['Serve'].setText(str(override))
override = config.get('override_terminal_cmd', 'xterm -hold -e')
self.uiObjects['Terminal'].setText(str(override))
def updateBuildozerSpec(self):
config = self.config
use_custom_spec = config.get('use_custom_spec', False)
if use_custom_spec:
self.checkBoxes['CustomSpec'].setCheckState(2)
else:
self.checkBoxes['CustomSpec'].setCheckState(0)
self.uiObjects['CustomSpec'].clear()
spec = config.get('custom_spec', '')
self.uiObjects['CustomSpec'].insertPlainText(str(spec))
def updateCustomTargets(self):
config = self.config
targets = config.get('custom_target', '')
self.uiObjects['CustomTarget'].insertPlainText(targets)
def setDefaultConfig(self, config):
self.configDefaults.update(config)
def setConfig(self, config):
self.config.update(config)
self.updateGeneral()
self.updateCommands()
self.updateCustomTargets()
self.updateSubstitute()
self.updateBlacklist()
self.updateBuildozerSpec()
def getCheckboxBool(self, check):
if self.checkBoxes[check].checkState():
return True
else:
return False
def value(self):
config = {}
config['ask_save_on_close'] = self.getCheckboxBool('AskSaveOnClose')
config['show_splashscreen'] = self.getCheckboxBool('SplashScreen')
config['keep_recent_files'] = self.getCheckboxBool('RecentFiles')
for x in ['clean', 'build', 'distclean', 'run',
'deploy', 'serve', 'terminal']:
config['override_{}_cmd'.format(x.lower())] = self.uiObjects[x.title()].text()
config['override_{}'.format(x.lower())] = self.getCheckboxBool(x.title())
config['custom_target'] = self.uiObjects['CustomTarget'].toPlainText()
config['app_blacklist'] = self.uiObjects['BlacklistApp'].toPlainText()
config['use_app_blacklist'] = self.getCheckboxBool('BlacklistApp')
config['buildozer_blacklist'] = self.uiObjects['BlacklistBuildozer'].toPlainText()
config['use_buildozer_blacklist'] = self.getCheckboxBool('BlacklistBuildozer')
config['app_substitute'] = self.uiObjects['SubApp'].toPlainText()
config['use_app_substitute'] = self.getCheckboxBool('SubApp')
config['buildozer_substitute'] = self.uiObjects['SubBuildozer'].toPlainText()
config['use_buildozer_substitute'] = self.getCheckboxBool('SubBuildozer')
config['custom_spec'] = self.uiObjects['CustomSpec'].toPlainText()
config['use_custom_spec'] = self.getCheckboxBool('CustomSpec')
config['custom_spec'] = self.uiObjects['CustomSpec'].toPlainText()
config['use_custom_spec'] = self.getCheckboxBool('CustomSpec')
return config
|
swprojects/Buildertron
|
buildertron/spec/translate_objects.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*
"""
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
translate = {
'buildertron': {
'targetname': 'targetname',
},
'app': {
'title': 'title',
'package.name': 'package_name',
'package.domain': 'package_domain',
'source.dir': 'source_dir',
'source.include_exts': 'source_include_exts',
'source.include_patterns': 'source_include_patterns',
'source.exclude_dirs': 'source_exclude_dirs',
'source.exclude_exts': 'source_exclude_exts',
'source.exclude_patterns': 'source_exclude_patterns',
'version': 'version',
'version.regex': 'version_regex',
'version.filename': 'version_filename',
'requirements': 'requirements',
'requirements.source.kivy': 'requirements_source_kivy',
'garden_requirements': 'garden_requirements',
'presplash.filename': 'presplash_filename',
'icon.filename': 'icon_filename',
'orientation': 'orientation',
'services': 'services',
# OSX Specific
'author': 'author',
'osx.python_version': 'osx_python_version',
'osx.kivy_version': 'osx_kivy_version',
# ANDROID SPECIFIC
'android.presplash_color': 'android_presplash_color',
'android.permissions': 'android_permissions',
'android.api': 'android_api',
'android.minapi': 'android_minapi',
'android.sdk': 'android_sdk',
'android.ndk': 'android_ndk',
'android.private_storage': 'android_private_storage',
'android.ndk_path': 'android_ndk_path',
'android.sdk_path': 'android_sdk_path',
'android.ant_path': 'android_ant_path',
'android.skip_update': 'android_skip_update',
'android.entrypoint': 'android_entrypoint',
'android.whitelist': 'android_whitelist',
'android.blacklist': 'android_blacklist',
'android.whitelist_src': 'android_whitelist',
'android.blacklist_src': 'android_blacklist',
'android.add_jars': 'android_add_jars',
'android.add_src': 'android_add_src',
'android.add_aars': 'android_add_aars',
'android.gradle_dependencies': 'android_gradle_dependencies',
'p4a_branch': 'p4a_branch',
'fullscreen': 'fullscreen',
# OUYA
'android.ouya_category': 'android_ouya_category',
'android.ouya_icon_filename': 'android_ouya_icon_filename',
'android.manifest_intent_filters': 'android_manifest_intent_filters',
'android.add_libs_armeabi': 'android_add_libs_armeabi',
'android.add_libs_armeabi_v7a': 'android_add_libs_armeabi_v7a',
'android.add_libs_x86': 'android_add_libs_x86',
'android.add_libs_mips': 'android_add_libs_mips',
'android.manifest_launch_mode': 'android_manifest_launch_mode',
'android.wakelock': 'android_wakelock',
'android.meta_data': 'android_meta_data',
'android.library_references': 'android_library_references',
'android.logcat_filters': 'android_logcat_filters',
'android.copy_libs': 'android_copy_libs',
'android.add_activites': 'android_add_activites',
'android.arch': 'android_arch',
'p4a.source_dir': 'p4a_source_dir',
'p4a.local_recipes': 'p4a_local_recipes',
'p4a.hook': 'p4a_hook',
'p4a.bootstrap': 'p4a_bootstrap',
# iOS specific
'ios.kivy_ios_dir': 'ios_kivy_ios_dir',
'ios.codesign_debug': 'ios_codesign_debug',
'ios.codesign_release': 'ios_codesign_release',
},
'buildozer': {
'log_level': 'log_level',
'warn_on_root': 'warn_on_root',
'build_dir': 'build_dir',
'bin_dir': 'bin_dir',
},
}
|
salRoid/Dynamic-Certificate-Generator
|
scripts/genCertificate.py
|
from PIL import Image, ImageDraw, ImageFont
import pandas as pd
import os
import sys
import json
generationType = sys.argv[1]
path = '/Users/salroid/Documents/GitHub/Dynamic-Certificate-Generator/'
# Single or Bulk
if (generationType == "Single") :
certificateId = input("Enter certificate Id: ")
pathCertificate = path + 'certificates/' + certificateId + '.jpg'
pathCertificateConfig = path + 'Configs/' + certificateId + '.json'
certificateName = ''
img = Image.open(pathCertificate)
draw = ImageDraw.Draw(img)
W, H = img.size
i = 0
with open(pathCertificateConfig) as json_file:
data = json.load(json_file)
for p in data['properties']:
value = input("Enter " + str(p['name']) + ": ")
if (i == 0) :
certificateName = value.replace(" ", "")
x1 = int (p['x1'])
x2 = int (p['x2'])
y1 = int (p['y1'])
y2 = int (p['y2'])
fontSize = int(p['fontSize'])
fontName = str(p['fontName'])
font = ImageFont.truetype(fontName, fontSize)
w, h = font.getsize(value)
midXaxis = ((x2 -x1) / 2) + x1
draw.text(xy=(midXaxis - w/2 ,y1 - h) ,text='{}'.format(value),fill=(0,0,0),font=font)
i = i + 1
certificateName = certificateName + '_' + str(certificateId)
img.save('/Users/salroid/Documents/GitHub/Dynamic-Certificate-Generator/pictures/{}.jpg'.format(certificateName))
else :
### Get total values from CSV and draw them
df = pd.read_csv('/Users/salroid/Documents/GitHub/Dynamic-Certificate-Generator/DataSheet/dataSheet.csv')
for index,j in df.iterrows():
certificateId = j['id']
pathCertificateConfig = path + 'Configs/' + str(certificateId) + '.json'
pathCertificate = path + 'certificates/' + str(certificateId) + '.jpg'
certificateName = ''
img = Image.open(pathCertificate)
draw = ImageDraw.Draw(img)
W, H = img.size
i = 0
with open(pathCertificateConfig) as json_file:
data = json.load(json_file)
for p in data['properties']:
index = 'item' + str(i+1)
value = j[index]
if (i == 0) :
certificateName = value.replace(" ", "")
x1 = int (p['x1'])
x2 = int (p['x2'])
y1 = int (p['y1'])
y2 = int (p['y2'])
fontSize = int(p['fontSize'])
fontName = str(p['fontName'])
font = ImageFont.truetype(fontName, fontSize)
w, h = font.getsize(value)
midXaxis = ((x2 -x1) / 2) + x1
draw.text(xy=(midXaxis - w/2 ,y1 - h) ,text='{}'.format(value),fill=(0,0,0),font=font)
i = i + 1
certificateName = certificateName + '_' + str(certificateId)
img.save('/Users/salroid/Documents/GitHub/Dynamic-Certificate-Generator/pictures/{}.jpg'.format(certificateName))
|
salRoid/Dynamic-Certificate-Generator
|
scripts/getCoordinates.py
|
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def onclick(event):
print("button=%d, x=%d, y=%d, xdata=%f, ydata=%f" % (
event.button, event.x, event.y, event.xdata, event.ydata))
certificateName = sys.argv[1]
certificateId = certificateName.split('.')[0]
path = '/Users/salroid/Documents/GitHub/Dynamic-Certificate-Generator/'
pathCertificate = path + 'certificates/' + certificateId + '.jpg'
img = mpimg.imread(pathCertificate)
fig, dx = plt.subplots()
dx.grid(which='major', linestyle='-', linewidth='0.5', color='red')
ax = plt.imshow(img)
fig = ax.get_figure()
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
|
salRoid/Dynamic-Certificate-Generator
|
scripts/genConfigFile.py
|
import sys
import json
# TODO Validate Plot Certificate.jpg exists if not ask to run
# TODO Validate both the arguments
certificateId = sys.argv[1]
totalTextFields = sys.argv[2]
totalTextFields = int(totalTextFields)
# Create Dictionary for JSON config file
data = {}
data['properties'] = []
for i in range(0, totalTextFields, 1):
textfield = "text " + str(i+1)
#identifier = raw_input("Enter unique Identifier for" + textfield + ": " )
#x1, x2, y1, y2 = raw_input("Enter x1, x2, y1, y2 for " + textfield + ": ").split(" ")
#fontName, fontSize = raw_input("Enter font name and font size for " + textfield + ": ").split(" ")
data['properties'].append({
'name': "",
'x1': "",
'y1' : "",
'x2': "",
'y2': "",
'fontSize': "",
'fontName' : ""
})
# create JSON object
json_object = json.dumps(data, indent = 4, sort_keys=True)
# create Certificate file name .json
pathConfig = '/Users/salroid/Documents/GitHub/Dynamic-Certificate-Generator/Configs/' + certificateId + '.json'
with open(pathConfig, 'w') as outfile:
outfile.write(json_object)
|
YW81/DAML
|
lib/functions/triplet_loss.py
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import numpy as np
import chainer.functions as F
def triplet_loss(a,p,n,alpha=1.0):
"""Lifted struct loss function.
Args:
f_a (~chainer.Variable): Feature vectors as anchor examples.
All examples must be different classes each other.
f_p (~chainer.Variable): Positive examples corresponding to f_a.
Each example must be the same class for each example in f_a.
alpha (~float): The margin parameter.
Returns:
~chainer.Variable: Loss value.
See: `Deep Metric Learning via Lifted Structured Feature Embedding \
<http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/\
Song_Deep_Metric_Learning_CVPR_2016_paper.pdf>`_
"""
distance = F.sum((a - p) ** 2.0, axis = 1) - F.sum((a - n) ** 2.0, axis = 1) +alpha
return F.average(F.relu(distance)) / 2
|
theShmoo/DSA5RegelWikiParser
|
regelwikiparser/spiders/sonderfertigkeiten_spider.py
|
import scrapy
class FightAbilities(scrapy.Spider):
name = "fight"
start_urls = [
'http://www.ulisses-regelwiki.de/index.php/sf_kampfsonderfertigkeiten.html'
]
def parse(self, response):
for a in response.css('nav.mod_navigation a'):
name = a.css('a::attr(title)').extract_first()
link = a.css('a::attr(href)').extract_first()
if link is not None:
ability = response.urljoin(link)
yield scrapy.Request(ability, callback=self.parse_ability)
def parse_ability(self, response):
yield {
'text': response.css('div.ce_text').extract()
}
|
salesforce/smartACL
|
smartACL/tools.py
|
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import itertools
import sys
try:
import netaddr
except:
try:
import sys
sys.path.insert(0, 'smartACL/third_party')
sys.path.insert(0, 'third_party')
import third_party.netaddr as netaddr
except:
raise ImportError("The netaddr module is not installed, exiting...")
import linkdef
def DEBUG(debug_v, function, *args):
if debug_v:
print "[DEBUG][" + str(function) + "]", args
def cidr_to_mask(cidr):
return '.'.join([str((0xffffffff << (32 - int(cidr)) >> i) & 0xff)
for i in [24, 16, 8, 0]])
def mask_to_wild(mask):
return '.'.join([str(255 - int(i)) for i in mask.split('.')])
'''
Not used, but let's keep it
def get_last_IP_wild(net, wild):
# Return the last valid IP from a network with a wildcard
netS = net.split('.')
wildS = wild.split('.')
iPos = 0
resS = []
for i in xrange(0, 4):
resS.append([])
for netOctect in netS:
iPos += 1
netB = format(int(netOctect), 'b').zfill(8)
wildB = format(int(wildS[iPos - 1]), 'b').zfill(8)
for x in xrange(8):
resS[iPos - 1].append(str(int(netB[x]) | int(wildB[x])))
res = ''
for i in resS:
res = res + '.' + str(int(''.join(i), 2))
return res[1:]
'''
def calc_previous_IP(IP):
# Return the previous IP to one given
n = []
first = True
for octect in IP.split('.')[::-1]:
if int(octect) == 0 and first:
n.append('255')
elif first:
first = False
n.append(str(int(octect) - 1))
else:
n.append(str(int(octect)))
return '.'.join(n[::-1])
def wild_is_contiguous(wild):
# Check if a wildcard is contiguous or not
one_found = False
wildS = wild.split('.')
for octect in wildS:
wildB = format(int(octect), 'b').zfill(8)
for x in xrange(8):
if wildB[x] == '1' and not one_found:
one_found = True
elif wildB[x] == '0' and one_found:
return False
return True
def split_non_contiguous(ip, wild):
# It will create contiguous networks from the IP/wildcard (where wildcard is non contiguous)
# Although technically it will work with contiguous wildcard, it will create in most of the
# cases /32 networks, so it's not useful for this input.
def _contiguous_octect(wild):
# Check if the binary is a contiguous.
cont = True
first_bit = wild[0]
if first_bit == '1' and wild.count('0') > 0:
cont = False
else:
for i in xrange(7):
if first_bit == '0':
if wild[i+1] == '0':
continue
else:
first_bit = '1'
continue
elif first_bit == '1' and wildB[i+1] != '1':
cont = False
break
return cont
net_list = []
num_networks = 0
number_ones = 0
# Number of networks is defined by numbers of 1
# To find the mask length we need to perform a logic add between wild and network and
# count the number of zeros starting from the right until first 1
wildS = wild.split('.')
ipS = ip.split('.')
'''
Counting number of networks and defining non contiguous octects:
- We have the variable wild_list_contiguous_octects that is a list of the status of each octect. This state can be:
- cont -> contiguous octect
- non-cont -> non contiguous octect
- full1 -> octect with 11111111
We have to count the number of "1" per octects. If the octect is non contiguous then it's marked.
If the octect is contiguous:
- If it's '11111111' is marked
- If not, we need to confirm that the all previous octects are '11111111'.
Examples:
Standard non contiguous:
0.0.0.2.255 :
- We start with the 255 -> contiguous and full 1
- 2 -> non contiguous
- 0 and 0 -> contiguous
Then the number of 1 in non-contiguous is one, so 2 ^ 1 = 2 networks
Weird non contiguous mask:
0.0.8.63 :
- We start with 63 (01111111) -> contiguous, but NOT full
- 8 -> Non contiguous
- 0 and 0 -> contiguous
Then the number of 1 in non-contiguous is one, so 2 ^ 1 = 2 networks
More weird non contiguous mask:
0.0.1.63 -> This a non-contiguous mask, but each of the octects are contiguous:
- We start with 63 (01111111) -> contiguous, but NOT full
- 1 -> contiguous BUT, the all the previous octects are not full1 so it's marked as NON contiguous
- 0 and 0 -> contiguous
Then the number of 1 in non-contiguous is one, so 2 ^ 1 = 2 networks
'''
iPos = 4
wild_list_contiguous_octects = ['cont', 'cont', 'cont', 'cont']
for octect in reversed(wildS):
iPos -= 1
wildB = format(int(octect), 'b').zfill(8)
if _contiguous_octect(wildB):
if wildB.count('1') == 8:
wild_list_contiguous_octects[iPos] = 'full1'
else:
if iPos < 3:
for i in range(iPos, 3):
wildB_temp = format(int(wildS[i]), 'b').zfill(8)
if wild_list_contiguous_octects[i] != 'full1':
wild_list_contiguous_octects[iPos] = 'non-cont'
break
else:
wild_list_contiguous_octects[iPos] = 'cont'
else:
wild_list_contiguous_octects[iPos] = 'non-cont'
if wild_list_contiguous_octects[iPos] == 'non-cont':
number_ones += wildB.count('1')
num_networks = 2 ** number_ones
'''
# Calculating mask of networks.
Once we identify the number of networks we have, we need to find which network mask is going to be used.
Unfortunately is not clear this information in a non contiguous wild card. For example:
10.0.0.0 0.0.2.0 -> Can be split into 10.0.0.0/24 and 10.0.2.0/24
but also this one:
10.0.0.0 0.0.2.255 -> Can be split into 10.0.0.0/24 and 10.0.2.0/24
The first case will be only posible if the octect in the IP is 0.
We defined a "logic switch" name weird_0_switch to indentify this behaviour.
To calculated the netmask, we need to start to check "1" starting from right to left. We take all the contiguous "1" and
we create the netmask using 2 ^ number of ones.
'''
iPos = 4
netmask = ''
weird_0_switch = True
for octect in reversed(wildS):
iPos -= 1
wildB = format(int(octect), 'b').zfill(8)
ipB = format(int(ipS[iPos]), 'b').zfill(8)
if wild_list_contiguous_octects[iPos] == 'cont' or wild_list_contiguous_octects[iPos] == 'full1':
# It seems that a valid wildcard non contiguous is:
# 172.16.17.32 -> that would match eight /24 networks
# but this wildcard should be represented as:
# 192.168.127.12 -> clearly now a /24 networks.
# This change from 0 to 255 should only happen starting from the right to the left
# and before we find any other value.
# The switch: weird_0_switch is used to control this.
if wildB == '00000000' and ipB == '00000000' and weird_0_switch:
netmask = '255' + '.' + netmask
else:
weird_0_switch = False
netmask = str(2 ** wildB.count('1') - 1) + '.' + netmask
else:
netmask = '0' + '.' + netmask
netmask = netmask[:-1]
# Binary list of all combinations that we would use to create the networks
# This is how it works (with only one octect as example):
#
# network_addres: 00100000
# wildcard: 00011000
#
# In bit position 4 and 5 any value is accepted so it will create a the following list:
# (00,01,10,11)
#
# It will create the network changing the bit 4,5 (where there is a 1 in the wildcard) with the
# values generates in the binary list
# Creating binary list
bin_list = ["".join(seq) for seq in itertools.product("01", repeat=number_ones)]
ipS = ip.split('.')
ipB = ''
for octect in ipS:
ipB = ipB + '.' + format(int(octect), 'b').zfill(8)
ipB = ipB[1:]
wildS = wild.split('.')
wildB = ''
for octect in wildS:
wildB = wildB + '.' + format(int(octect), 'b').zfill(8)
wildB = wildB[1:]
for bin_combination in bin_list:
new_net = []
b = 0
iPos = -1
for octect in wildB.split('.'):
iPos += 1
for i in xrange(len(octect)):
if octect[i] == '.':
continue
elif octect[i] == '0':
new_net.append(ipB.split('.')[iPos][i])
else:
if wild_list_contiguous_octects[iPos] == 'cont' or wild_list_contiguous_octects[iPos] == 'full1':
new_net.append(ipB.split('.')[iPos][i])
else:
new_net.append(bin_combination[b])
b += 1
net_list.append(str(int(''.join(new_net[0:8]), 2)) + '.' +
str(int(''.join(new_net[8:16]), 2)) + '.' +
str(int(''.join(new_net[16:24]), 2)) + '.' +
str(int(''.join(new_net[24:32]), 2)) +
'/' + netmask)
netlist_obj = []
for net in net_list:
if net.split('/')[1] == '0.0.0.0':
net = net.split('/')[0] + '/' + '255.255.255.255'
netlist_obj.append(netaddr.IPNetwork(net))
merged_list = netaddr.cidr_merge(netlist_obj)
inv_oct = []
net_list = []
for i in merged_list:
inv_oct = []
bits = i.netmask.bits()
for octects in bits.split('.'):
inv_oct.append(str(int(''.join('1' if x == '0' else '0' for x in octects), base=2)))
wildcard = '.'.join(inv_oct)
host = str(i.ip)
net_list.append(host + '/' + wildcard)
return net_list
def color(style='', color='default'):
if color == 'default':
return '\x1b[0m'
fg = 30 + linkdef.colors.index(color.split('/')[0])
bg = 40 + linkdef.colors.index(color.split('/')[1])
return '\x1b[%sm' % (';'.join([str(linkdef.styles.index(style)), str(fg), str(bg)]))
|
salesforce/smartACL
|
smartACL/link.py
|
<filename>smartACL/link.py<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import sys
import os
import argparse
from linkdef import *
import link_cisco
import link_juniper
import link_pol
import link_fortigate
import tools
import glob
def link(sourceIP, destIP, files, opts):
"""
Main execution of Link
:param sourceIP: Source IP
:param destIP: Destination IP
:param files: list of files to check
:param opts: dictionary with extra parameters
:return: List of files with TRUE/FALSE if there is a HIT or not
"""
if sourceIP == '0.0.0.0' and destIP == '0.0.0.0':
return 'ERROR: Please, at least Source IP or Destination IP can not be 0.0.0.0'
if type(files) is not list or len(files) < 1:
return 'ERROR: No files specified.'
# Initial values for opts
list_k = ['sport', 'dport','proto', 'acltype', 'showallmatches', 'showdenyall', 'hideallowall', 'matchanyport', 'summarized', 'capircadef', 'nooutput', 'ignore-line', 'debug']
list_v_def = ['0', # sport
'0', # dport
'ip', # proto
'', # acltype
False, # showallmatches
False, # showdenyall
True, # hideallowall
True, # matchanyport
False, # summarized
'', # capircadef
False, # nooutput
'', # ignore-line
False] # debug
for i,v in enumerate(list_k):
if v not in opts:
opts[v] = list_v_def[i]
ignore_lines = opts['ignore-line']
if ignore_lines != '':
ignore_lines = ignore_lines.split(',')
sport = opts['sport']
dport = opts['dport']
if '-' in dport:
if int(dport.split('-')[0]) > int(dport.split('-')[1]):
dport = dport.split('-')[1] + '-' + dport.split('-')[0]
if '-' in sport:
if int(sport.split('-')[0]) > int(sport.split('-')[1]):
sport = sport.split('-')[1] + '-' + sport.split('-')[0]
files_found = {}
parsed = False
for sIP in sourceIP.split(','):
for dIP in destIP.split(','):
print '############ CHECKING FLOW ############'
print sIP, '->', dIP, 'Dest Port: ', dport, 'Source Port: ', sport, 'Protocol:', opts['proto']
print '############## ACL CHECK ##############'
for filename in files:
policy = FWPolicy('', filename, opts['debug'])
if opts['acltype'] != '':
type_ext = opts['acltype']
else:
type_ext = filename.split('.')[len(filename.split('.')) - 1]
if type_ext == 'acl' or type_ext == 'ncl' or type_ext == 'fcl':
parsed = link_cisco.acl_parser(filename, policy, remarkasname=False, DEBUG=opts['debug'])
elif type_ext == 'jcl':
parsed = link_juniper.jcl_parser(filename, policy, opts['debug'])
elif type_ext == 'pol':
if opts['capircadef'] == '':
print 'Can\'t parse POL file without a valid Capirca Definitions Directory. Ignoring file:', filename
continue
parsed = link_pol.pol_parser(filename, opts['capircadef'], policy, opts['debug'])
elif type_ext == 'ftg':
parsed = link_fortigate.for_parser(filename, policy, DEBUG=opts['debug'])
else:
print 'Can\'t detect ACL type. Ignoring file:', filename
continue # if the file extension is not known next one
# TODO: Link should detect ff it's a file and can't detect is is ACL/JCL, etc. it should through an error
if parsed:
if not opts['summarized']:
sys.stdout.write('Processing file: {0:<64}'.format(filename))
if opts['debug']: policy.print_policy()
# Ignoring lines
if len(ignore_lines) > 0:
num_rule = policy.get_rules_number()
while num_rule > 0:
rule = policy.get_rule(num_rule)
if not rule[0].startswith('^'): # Disabled rules
if rule[0] in ignore_lines:
policy.remove_rule(num_rule)
num_rule -= 1
rules_found = policy.link(sIP, dIP, dport, sport, opts['proto'], rules_exclude=[], show_deny=opts['showdenyall'], hide_allow_all=opts['hideallowall'], showallmatches=opts['showallmatches'], anyport=opts['matchanyport'])
if len(rules_found) > 0:
rules_names = []
for i in rules_found:
rules_names.append(policy.get_rule(i))
if filename not in files_found:
files_found[filename] = []
files_found[filename] = rules_names
if not opts['summarized']:
for i in rules_found:
if rules_found.index(i) > 0:
sys.stdout.write('Processing file: {0:<64}'.format(filename))
print tools.color('B', 'red/black') + ' - HIT!!' + tools.color()
policy.print_rule(i, color=True)
print
else:
if not opts['summarized']:
print ' - no match'
del policy
else:
print '[ERROR] Can\'t parse file:', filename
if opts['summarized']:
print 'Files processed:', len(files)
print 'Files NOT matched', len(files) - len(files_found)
if len(files_found) > 0:
print 'Files matched:'
for i in files_found:
print i
else:
print 'NOT matched in any file'
return files_found
"""
MAIN
"""
if __name__ == '__main__': # pragma: no cover
parser = argparse.ArgumentParser()
parser.add_argument('Source', help='Source IP/Network to check. Use 0.0.0.0 for ANY. You can specify more than one source separating them by "," (no spaces)')
parser.add_argument('Destination', help='Destination IP/Network to check. Use 0.0.0.0 for ANY. You can specify more than one destination separating them by "," (no spaces)')
parser.add_argument('File', help='File or Directory to check the IPs (you can use * )', nargs='*')
parser.add_argument('--protocol', help='Value ip/tcp/udp/icmp. IP by default.', default='ip')
parser.add_argument('--sport', help='Source port. It could be a range (use - to separate ports) (ANY by default).', default='0')
parser.add_argument('--dport', help='Destination port. It could be a range (use - to separate ports) (ANY by default).', default='0')
parser.add_argument('--match-any-range-port', dest='matchanyport', help='If a range is used for a port, then any match included in the range is shown', action='store_true')
parser.add_argument('--showdenyall', help='Show matches with ANY ANY DENY', action='store_true')
parser.add_argument('--hideallowall', help='Hide matches with ANY ANY PERMIT', action='store_true')
parser.add_argument('--showallmatches', help='Show all matches instead of stopping with the first found', action='store_true')
parser.add_argument('--acltype', help='Specifiy the ACL type: acl,ncl,jcl,pol,ftg')
parser.add_argument('--summarized', help='Show only a summary for the flow/s requested', action='store_true')
parser.add_argument('--capircadef', help='Capirca definitions directory', default='')
parser.add_argument('--ignore-line', dest='ignore_term', help='Ignore the following lines (ACL remark for Cisco or Term name for Juniper)', default='')
parser.add_argument('--nooutput', help='Hide any output (useful as module)', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--foo', help=argparse.SUPPRESS, action='store_true') # no output at all, just for testing code
args = parser.parse_args()
if args.foo:
null = open(os.devnull, 'w')
sys.stdout = null
opts = {}
sIP = args.Source
dIP = args.Destination
files = ''
for i in args.File:
if '*' in i:
# Windows platform
files = glob.glob(args.File)
if files == '':
files = args.File
opts['proto'] = args.protocol
opts['dport'] = args.dport
opts['sport'] = args.sport
opts['matchanyport'] = False
if args.matchanyport:
opts['matchanyport'] = True
opts['acltype'] = ''
if args.acltype:
if args.acltype == 'pol' and not args.capircadef:
print 'ERROR: If you want to use POL (Capirca Policies) you need to specify Capirca Definitions Directory with --capircadef'
sys.exit(-1)
opts['acltype'] = args.acltype
opts['showdenyall'] = args.showdenyall
opts['hideallowall'] = args.hideallowall
opts['showallmatches'] = args.showallmatches
opts['summarized'] = args.summarized
opts['capircadef'] = args.capircadef
opts['ignore-line'] = args.ignore_term
opts['nooutput'] = args.nooutput
opts['debug'] = args.debug
r = link(sIP, dIP, files, opts)
if type(r) is not dict:
print r
print r
sys.exit()
|
salesforce/smartACL
|
tests/test_smartShadow.py
|
<filename>tests/test_smartShadow.py
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import unittest
import sys
import os
from smartACL import linkdef
from smartACL import link_cisco
from smartACL import link_juniper
from smartACL import smartACL
class smartTest(unittest.TestCase):
def setUp(self):
self.file1 = 'tests/test_data/test_acl_smartShadow1'
self.file2 = 'tests/test_data/test_acl_smartShadow2'
self.file3 = 'tests/test_data/test_acl_smartShadow3'
self.file4 = 'tests/test_data/test_acl_smartShadow4'
self.file5 = 'tests/test_data/test_acl_smartShadow5'
self.file6 = 'tests/test_data/test_acl_smartShadow6'
self.file7 = 'tests/test_data/test_acl_smartShadow7'
self.file8 = 'tests/test_data/test_acl_smartShadow8'
self.file9 = 'tests/test_data/test_acl_smartShadow9'
self.file10 = 'tests/test_data/test_acl_smartShadow10'
self.file11 = 'tests/test_data/test_acl_smartShadow11'
self.file12 = 'tests/test_data/test_acl_smartShadow12'
self.results_f1 = [{'permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127': 'permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080': 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.127 eq 7080': 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080': 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080'}, {}]
self.results_f2 = [{'permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127': 'permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080': 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080\npermit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080\npermit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.127 eq 7080'}, {}]
self.results_f3 = [{}, {}]
self.results_f4 = [{'deny tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.0.255.255 eq 80': 'deny tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.255.255.255 eq 80'}, {'deny tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.0.255.255 eq 80': 'deny tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.255.255.255 eq 80'}]
self.results_f5 = [{}, {'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.255 eq 80': 'deny tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.255.255.255 eq 80'}]
self.results_f6 = [{'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.127 eq 80': 'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.255 eq 80'}, {}]
self.results_f7 = [{'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.127 eq 80': 'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.255 eq 80'}, {}]
self.results_f8 = [{'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.127 eq 80': 'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.255 eq 80'}, {'permit tcp host 10.0.0.10 host 10.10.10.10 eq 80': 'deny tcp 10.0.0.0 0.255.255.255 10.0.0.0 0.255.255.255 eq 80'}]
self.results_f9 = [{'term testt5': 'term testt6', 'term testt2': 'term testt1'}, {'term testt6': 'term testt3'}]
self.results_f10 = [{'term testt2a': "term testt1{1{1{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.0']", "term testt2b{8{1{['192.168.3.11/255.255.255.192', '10.0.1.0/255.255.255.0']": "term testt1{1{3{['172.16.58.3/255.255.255.0', '10.0.1.0/255.255.255.0']", "term testt6{17{1{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.192']": "term testt1{1{1{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.0']"}, {'term testt5': "term testt3{10{1{['10.0.0.0/255.0.0.0', '10.0.0.0/255.0.0.0']\nterm testt3{10{2{['10.0.0.0/255.0.0.0', '172.16.58.3/255.0.0.0']", "term testt6{17{1{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.192']": "term testt3{10{1{['10.0.0.0/255.0.0.0', '10.0.0.0/255.0.0.0']"}]
self.results_f11 = [{'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.127 eq 80': 'permit tcp 10.0.0.0 0.0.0.255 10.0.1.0 0.0.0.255 eq 80'}, {'permit tcp 192.168.127.12 0.0.0.255 10.0.1.0 0.0.0.255 eq 80': 'deny tcp any any eq 80', 'permit tcp host 10.0.0.10 host 10.10.10.10 eq 80': 'deny tcp any any eq 80'}]
self.results_f12 = [{'permit udp 0.0.0.0 0.0.0.0 eq 67 255.255.255.255 0.0.0.0 eq 68': 'permit udp any eq 67 255.255.255.255 0.0.0.0 eq 68'}, {}]
null = open(os.devnull, 'w')
self.stdout = sys.stdout
sys.stdout = null
def test_smartShadow1(self):
policy = linkdef.FWPolicy('', self.file1)
link_cisco.acl_parser(self.file1, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f1)
def test_smartShadow2(self):
policy = linkdef.FWPolicy('', self.file2)
link_cisco.acl_parser(self.file2, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f2)
def test_smartShadow3(self):
policy = linkdef.FWPolicy('', self.file3)
link_cisco.acl_parser(self.file3, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f3)
def test_smartShadow4(self):
policy = linkdef.FWPolicy('', self.file4)
link_cisco.acl_parser(self.file4, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f4)
def test_smartShadow5(self):
policy = linkdef.FWPolicy('', self.file5)
link_cisco.acl_parser(self.file5, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f5)
def test_smartShadow6(self):
policy = linkdef.FWPolicy('', self.file6)
link_cisco.acl_parser(self.file6, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f6)
def test_smartShadow7(self):
policy = linkdef.FWPolicy('', self.file7)
link_cisco.acl_parser(self.file7, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f7)
def test_smartShadow8(self):
policy = linkdef.FWPolicy('', self.file8)
link_cisco.acl_parser(self.file8, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f8)
def test_smartShadow9(self):
policy = linkdef.FWPolicy('', self.file9)
link_juniper.jcl_parser(self.file9, policy)
policy.split_ips()
self.assertEqual(smartACL.smartShadow2(policy), self.results_f9)
def test_smartShadow10(self):
policy = linkdef.FWPolicy('', self.file10)
link_juniper.jcl_parser(self.file10, policy)
policy.split_ips()
self.assertEqual(smartACL.smartShadow2(policy), self.results_f10)
def test_smartShadow11(self):
policy = linkdef.FWPolicy('', self.file11)
link_cisco.acl_parser(self.file11, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f11)
def test_smartShadow12(self):
policy = linkdef.FWPolicy('', self.file12)
link_cisco.acl_parser(self.file12, policy)
self.assertEqual(smartACL.smartShadow2(policy), self.results_f12)
def tearDown(self):
sys.stdout = self.stdout
if __name__ == '__main__':
unittest.main()
|
salesforce/smartACL
|
smartACL/link_juniper.py
|
<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
from linkdef import *
import sys
def cidr_to_mask(cidr):
return '.'.join([str((0xffffffff << (32 - int(cidr)) >> i) & 0xff)
for i in [24, 16, 8, 0]])
def parse_data_store(policy, rulenumber, attribute, data):
if 'address' in attribute:
if len(data.split(',')) >= 1:
temp = data.split(',')
for i in range(len(temp)):
if ':' in temp[i]:
# IPv6
pass
elif '/' in temp[i]:
tip = temp[i].split('/')
tip[1] = cidr_to_mask(tip[1])
temp[i] = tip[0] + '/' + tip[1]
else:
temp[i] = temp[i] + '/255.255.255.255'
tdata = ''
for i in temp:
tdata = tdata + ',' + i
data = tdata[1:]
if 'port' in attribute:
if '[' in data:
tdata = data.replace('[', '').replace(']', '').strip()
tdata = tdata.split()
data = ''
for i in tdata:
data = data + ',' + i
data = data[1:]
policy.set_rule_dyn_data(rulenumber, attribute, data)
def jcl_parser(filename, policy, DEBUG=False):
"""
Parsing Juniper ACL
:param filename: could be a file or if it's use as module a Python list with all the ACLs
:param policy: policy object
:param DEBUG: DEBUG flag
:return: True
"""
lfilter = False
lterm = False
lthen = False
laction_set = False
filterjcl = ''
termjcl = ''
value = ''
rulenumber = 0
jcl = ''
oldline = ''
data_section = ''
# "pre-parsing" the file
if type(filename) is not list:
f = open(filename, 'r')
else:
f = filename
for line in f:
line = line.strip()
if line == '{': # fixing some JCL with only '{' that belongs to the previous line
oldline = oldline + ' ' + line
if not oldline == '':
if jcl == '':
jcl = oldline
else:
jcl = jcl + '\n' + oldline
oldline = line
jcl = jcl + '\n' + oldline # Writing the last line
for line in jcl.split('\n'):
line = line.strip()
if lfilter:
if line.startswith('/*') or line.startswith('**') or line.startswith('*/') or line == '':
continue
if DEBUG:
print '[DEBUG]', line
firstw = line.split()[0]
if ';' in firstw:
firstw = firstw[:-1]
if firstw in jclbanwords and \
'fragment-offset 0' not in line: # Exception for fragment-offset 0 -> used in ICMP and only match for the first fragmented packet
# Reset the value of the term and wait for the next one
termjcl = '^' + policy.get_rule_name(rulenumber)
policy.set_rule_name(rulenumber, termjcl)
policy.set_empty_rule(rulenumber)
lterm = False
value = ''
data_section = ''
if '{' in line:
# Checking last char is '{'
if line[-1:] != '{':
print '[ERROR] "{" is not last character'
continue
if line.startswith('filter'):
filterjcl = line.split('{')[0].strip()
policy.set_name(filterjcl)
value = ''
rulenumber = 0
elif line.startswith('term'):
termjcl = line.split('{')[0].strip()
rulenumber = policy.new_empty_rule(False)
policy.set_rule_name(rulenumber, termjcl)
value = ''
elif line.startswith('inactive'):
# If line term is inactive, still we added, but empty.
# We have to the same that the line starts with 'term'
termjcl = '^' + line.split('{')[0].strip()
rulenumber = policy.new_empty_rule(False)
policy.set_rule_name(rulenumber, termjcl)
# And we remove the rule
policy.set_empty_rule(rulenumber)
lterm = False
value = ''
data_section = ''
elif line.startswith('then') and lterm:
lthen = True
laction_set = False
elif '}' in line and lterm:
if not data_section == '':
parse_data_store(policy, rulenumber,jcldict[data_section], value)
if lthen and not laction_set: # Some rules could have a then section but not 'accept/discard', these lines can't be processed
policy.set_empty_rule(rulenumber)
value = ''
data_section = ''
lthen = False
laction_set = False
if firstw in jclwords and lterm:
if '{' in line:
data_section = firstw
else:
# The value is directly there. There are not {}
data_section = firstw
# jlcdict contains a list of Juniper special words per section.
# A dictionary was created to map these values with FWRules attributes
parse_data_store(policy, rulenumber, jcldict[data_section], line.split(' ', 1)[1][:-1])
value = ''
data_section = ''
elif not '{' in line and not '}' in line and lterm:
if lthen:
if firstw == 'accept':
laction_set = True
policy.set_rule_action(rulenumber, True)
elif firstw == 'discard':
laction_set = True
policy.set_rule_action(rulenumber, False)
else:
# Juniper allow to add "inactive" in any place of the rule, to have only part of it inactive
if value == '' and not line.startswith('inactive'):
value = line.split(';')[0] # Removing ";"
elif not line.startswith('inactive'):
value = value + ',' + line.split(';')[0]
if '{' in line and line.startswith('term'):
lterm = True
if '{' in line and line.startswith('family inet'):
lfilter = True
return True
"""
MAIN
"""
if __name__ == "__main__":
policy = FWPolicy('', sys.argv[1])
jcl_parser(sys.argv[1], policy)
policy.print_policy()
print '-----------------'
print 'SPLITTING POLICY'
print '-----------------'
policy.split_ips()
policy.print_policy()
|
salesforce/smartACL
|
tests/test_Link.py
|
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import unittest
import os
import sys
import glob
from smartACL import link
class smartTest(unittest.TestCase):
def setUp(self):
self.files_acl= './tests/test_data/test_acl_link.acl'
self.files_ncl = './tests/test_data/test_acl_link.ncl'
self.files_jcl = './tests/test_data/test_acl_link.jcl'
self.files_ftg = './tests/test_data/test_acl_link.ftg'
self.files_all = './tests/test_data/test_acl_link*'
self.input1 = ['8.8.8.8', '9.9.9.9']
self.input2 = ['1.2.3.5', '10.0.0.1']
self.input3 = ['10.0.0.1', '10.0.0.2']
self.input4 = ['10.0.1.129', '10.0.1.1']
self.input5 = ['172.16.58.3', '10.0.0.2']
self.input6 = ['172.16.58.3', '10.0.0.3']
self.results_acl_basic = {}
self.results_ncl_basic = {}
self.results_jcl_basic = {}
self.results_all = {'./tests/test_data/test_acl_link.acl': [['permit tcp host 1.2.3.5 host 10.0.0.1 eq 80', '1.2.3.5/255.255.255.255', '10.0.0.1/255.255.255.255', '80', '0', 'tcp', True, False, '', '', False, False, ''], ['permit tcp host 1.2.3.5 gt 7000 any', '1.2.3.5/255.255.255.255', '0.0.0.0/0.0.0.0', '0', '7001-65535', 'tcp', True, False, '', '', False, False, '']],
'./tests/test_data/test_acl_link.ncl': [['permit tcp host 1.2.3.5 gt 7000 any', '1.2.3.5/0.0.0.0', '0.0.0.0/255.255.255.255', '0', '7001-65535', 'tcp', True, True, '', '', False, False, ''], ['permit tcp host 1.2.3.5 lt 8000 any', '1.2.3.5/0.0.0.0', '0.0.0.0/255.255.255.255', '0', '0-7999', 'tcp', True, True, '', '', False, False, '']]}
self.results_ftg_basic1 = {'./tests/test_data/test_acl_link.ftg': [['', '10.0.0.1/255.255.255.255', '10.0.0.2/255.255.255.255', '80,443', '', 'tcp', True, False, 'port1', 'any', False, False, '(Rule: 1) ']]}
self.results_ignore_line = {'./tests/test_data/test_acl_link.acl': [['permit ip 10.0.0.0/8 any eq http', '10.0.0.0/255.0.0.0', '0.0.0.0/0.0.0.0', '0', '0', 'ip', True, False, '', '', False, False, ''], ['permit 10.0.0.1 172.16.58.3', '10.0.0.1/11.0.0.1', '0.0.0.0/0.0.0.0', '0', '0', 'ip', True, False, '', '', False, False, '']]}
self.results_negated1 = {'./tests/test_data/test_acl_link.ftg': [['', '172.16.31.10/255.255.255.0', '10.0.0.0/255.255.255.0', '100-200', '50-80', 'tcp', True, False, 'any', 'any', False, False, '(Rule: 5) ']]}
self.results_negated2 = {'./tests/test_data/test_acl_link.ftg': [['', '172.16.31.10/255.255.255.0', '10.0.0.2/255.255.255.255', '1-65535', '', 'tcp', True, False, 'any', 'any', False, True, '(Rule: 3) ']]}
self.maxDiff = None
def test_Link_acl_basic(self):
self.assertEqual(link.link(self.input1[0], self.input1[1], glob.glob(self.files_acl), {}), self.results_acl_basic)
def test_Link_ncl_basic(self):
self.assertEqual(link.link(self.input1[0], self.input1[1], glob.glob(self.files_ncl), {}), self.results_ncl_basic)
def test_Link_jcl_basic(self):
self.assertEqual(link.link(self.input1[0], self.input1[1], glob.glob(self.files_jcl), {}), self.results_jcl_basic)
def test_Link_ftg_basic(self):
options = {'dport':'443','proto':'tcp'}
self.assertEqual(link.link(self.input3[0], self.input3[1], glob.glob(self.files_ftg), options), self.results_ftg_basic1)
def test_Link_all(self):
null = open(os.devnull, 'w')
t_stdout = sys.stdout
sys.stdout = null
options = {'dport':'80','proto':'tcp', 'showallmatches': True}
self.assertEqual(link.link(self.input2[0], self.input2[1], sorted(glob.glob(self.files_all)), options), self.results_all)
sys.stdout = t_stdout
def test_Link_ignore_line(self):
null = open(os.devnull, 'w')
t_stdout = sys.stdout
sys.stdout = null
options = {'dport':'443','proto':'tcp', 'showallmatches': True, 'ignore-line': 'term testt6'}
self.assertEqual(link.link(self.input4[0], self.input4[1], sorted(glob.glob(self.files_all)), options), self.results_ignore_line)
sys.stdout = t_stdout
def test_Link_negated1(self):
null = open(os.devnull, 'w')
t_stdout = sys.stdout
sys.stdout = null
options = {'dport':'101','proto':'tcp'}
self.assertEqual(link.link(self.input5[0], self.input5[1], sorted(glob.glob(self.files_all)), options), self.results_negated1)
sys.stdout = t_stdout
def test_Link_negated2(self):
null = open(os.devnull, 'w')
t_stdout = sys.stdout
sys.stdout = null
options = {'dport':'101','proto':'tcp'}
self.assertEqual(link.link(self.input6[0], self.input6[1], sorted(glob.glob(self.files_all)), options), self.results_negated2)
sys.stdout = t_stdout
if __name__ == '__main__':
unittest.main()
|
salesforce/smartACL
|
smartACL/linkdef.py
|
<reponame>salesforce/smartACL<filename>smartACL/linkdef.py
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
try:
import netaddr
except:
try:
import sys
sys.path.insert(0, 'smartACL/third_party')
sys.path.insert(0, 'third_party')
import third_party.netaddr as netaddr
except:
raise ImportError("The netaddr module is not installed, exiting...")
from tools import wild_is_contiguous
from tools import split_non_contiguous
from tools import color as tools_color
from tools import DEBUG as tools_debug
from tools import cidr_to_mask
from tools import mask_to_wild
# Ports Numbers
port_number = {'ftpdata': '20', # In Cisco is ftp-data but '-' is used a separator for ranges
'ftp': '21',
'bootps': '67',
'bootpc': '68',
'http': '80',
'ntp': '123',
'ldap': '389',
'https': '443'}
# Specific Juniper words to check ACL
jcldict = {'source-address': 'source_address',
'destination-address': 'destination_address',
'destination-port': 'destination_port',
'source-port': 'source_port',
'protocol': 'protocol'}
jclwords = ['source-address', 'destination-address', 'destination-port', 'source-port', 'protocol', 'action']
# Words 'banned' for any Juniper ACL. If the word is found the AC is ignored
jclbanwords = ['tcp-established', 'source-prefix-list', 'destination-prefix-list', 'next-header', 'fragment-offset']
colors = ['black', 'red', 'green', 'yellow', 'blue', 'purple', 'cyan', 'white']
styles = ['D', 'B', 'S', 'I', 'U', 'D2', 'D3', 'IN']
NONCONT_LIMIT = 256
class FWRule (object):
DEBUG = False
# Source/Destination address would allow the following formats (could be a list separated with ,:
# - X.X.X.X/Y.Y.Y.Y -> IP / net mask
# - X.X.X.X_Y.Y.Y.Y -> IP / Wildcard
def __init__(self):
self.rulenumber = 0 # Rule number
self.name = '' # Rule name
self.source_address = '' # Source address (as it was read from the ACL)
self.destination_address = '' # Destination address (as it was read from the ACL)
self.destination_port = '' # Destination port (can include operators)
self.source_port = '' # Source port (can include operators)
self.protocol = '' # Protocol
self.permit = False # Permit (Accept)
self.wildcard = False # If it's a rule with a valid wildcard (usually a Cisco ACL)
self.source_name = '' # Name for source (useful in case of Pol files)
self.dest_name = '' # Name for destination (useful in case of Pol files)
self.source_negated = False # Importing firewall rules it's possible to negate source (so everything "but")
self.dest_negated = False # Importing firewall rules it's possible to negate source (so everything "but")
self.comment = '' # Comment for the rule
#################################
# GET Methods #
#################################
def get_rule_action(self):
return self.permit
def get_rule(self):
rule=[]
rule.append(self.name) # 0
rule.append(self.source_address) # 1
rule.append(self.destination_address) # 2
rule.append(self.destination_port) # 3
rule.append(self.source_port) # 4
rule.append(self.protocol) # 5
rule.append(self.permit) # 6
rule.append(self.wildcard) # 7
rule.append(self.source_name) # 8
rule.append(self.dest_name) # 9
rule.append(self.source_negated) # 10
rule.append(self.dest_negated) # 11
rule.append(self.comment) # 12
return rule
def get_rule_name(self):
return self.name
def get_rule_number(self):
return self.rulenumber
def get_contiguous(self):
"""
Return the number of possible networks matching a non contiguous wildcard for source and destination
Return 0 is Source and Destination has a contiguous wildcard
:return: Integer (check description)
"""
def __number_nets(wild):
# The number of matches in a wildcard is defined by the number of 1
x = 0
wildS = wild.split('.')
for octect in wildS:
wildB = format(int(octect), 'b').zfill(8)
x += wildB.count('1')
return x
net_number = 0
if self.wildcard:
if self.source_address != 'any' and self.source_address != '0.0.0.0/255.255.255.255':
if not wild_is_contiguous(self.source_address.split('/')[1]):
net_number = __number_nets(self.source_address.split('/')[1])
if self.destination_address != 'any' and self.destination_address != '0.0.0.0/255.255.255.255':
if not wild_is_contiguous(self.destination_address.split('/')[1]):
if net_number == 0:
net_number = __number_nets(self.destination_address.split('/')[1])
else:
net_number = net_number * __number_nets(self.destination_address.split('/')[1])
return net_number
#################################
# SET Methods #
#################################
def set_rule_data(self, sAddress='', dAddress='', dPort='', sPort='', protocol='', ACCEPT=False, wildcard=False, name='-1', rulenumber=-1, source_name='', dest_name='', source_negated=False, dest_negated=False):
if rulenumber >= 0:
self.rulenumber = rulenumber
if name != '-1':
self.name = name
self.source_address = sAddress
self.destination_address = dAddress
self.destination_port = dPort
self.source_port = sPort
self.protocol = protocol
self.permit = ACCEPT
self.wildcard = wildcard
self.source_name = source_name
self.dest_name = dest_name
self.source_negated = source_negated
self.dest_negated = dest_negated
def set_name(self, name):
self.name = name
def set_comment(self, comment):
self.comment = comment
def set_rulenumber(self, rulenumber):
self.rulenumber = rulenumber
def set_dyn_data(self, attribute, data):
setattr(self, attribute, data)
def set_action(self, accept):
self.permit = accept
def set_empty(self):
self.set_rule_data(sAddress='',dAddress='',dPort='',sPort='',protocol='', ACCEPT=False, wildcard=False, source_name='' ,dest_name='')
def set_debug(self):
self.DEBUG = True
def set_source(self, address):
self.source_address = address
def set_destination(self, address):
self.destination_address = address
def set_source_port(self, port):
self.source_port = port
def set_destination_port(self, port):
self.destination_port = port
#################################
# Internal Methods #
#################################
def _checkIPWild(self, net, wild, IP):
"""
Check the IP is included in a network/wildcard
:param net: network address to be checked
:param wild: wildcard for the network
:param IP: IP to discover if it's a match
:return: TRUE/FALSE
"""
if IP == '0.0.0.0':
return True
netS = net.split('.')
ipS = IP.split('.')
wildS = wild.split('.')
iPos = 0
for netOctect in netS:
iPos += 1
netB = format(int(netOctect), 'b').zfill(8)
ipB = format(int(ipS[iPos - 1]), 'b').zfill(8)
wildB = format(int(wildS[iPos - 1]), 'b').zfill(8)
for x in xrange(8):
if wildB[x] == '0':
xor = (int(netB[x]) ^ int(ipB[x]))
if xor:
break
if xor:
break
return xor == 0
def _checkNetWild(self, net, wild, net_check):
"""
Check if net_check is included in a network/wildcard
:param net: network address
:param mask: mask address
:param net_check: network to find if it's included.
:return:
"""
if '/' not in net_check:
return False
if net == '0.0.0.0':
return True
if not wild_is_contiguous(wild):
net_list = split_non_contiguous(net, wild)
else:
net_list = [net + '/' + wild]
if not wild_is_contiguous(net_check.split('/')[1]):
net_check_list = split_non_contiguous(net_check.split('/')[0], net_check.split('/')[1])
else:
net_check_list = [net_check]
'''
Now we should have two lists without any non-contiguous IP included. Time to go through both of them
checking that all networks to be "checked" are included
'''
net_checked = []
for net1 in net_list:
try:
if net1.split('/')[1] == '0.0.0.0':
# When using wildcard 0.0.0.0 is 255.255.255.255, but Netaddr use the 0.0.0.0 ask mask for ALL, so this needs to be changed
net1 = net1.split('/')[0] + '/' + '255.255.255.255'
net_object_ori = netaddr.IPNetwork(net1)
except:
return None
if len(net_check_list) == 0:
break
net_check_temp = net_check_list[:]
for net2 in net_check_temp:
try:
net_object_des = netaddr.IPNetwork(net2)
except:
return None
if net2 in net_checked:
continue
if net_object_des in net_object_ori:
net_checked.append(net2)
net_check_list.remove(net2)
return len(net_check_list) == 0
def _checkIPMask(self, net, mask, IP):
"""
Check if the IP is included in a network/network mask
:param net: network address to be checked
:param mask: network mask for the network
:param IP: IP to discover if it's a match
:return: TRUE/FALSE
"""
if IP == '0.0.0.0' or mask == '0.0.0.0':
return True
netS = net.split('.')
ipS = IP.split('.')
maskS = mask.split('.')
iPos = 0
eq = False
for netOctect in netS:
iPos += 1
netB = format(int(netOctect), 'b').zfill(8)
ipB = format(int(ipS[iPos - 1]), 'b').zfill(8)
maskB = format(int(maskS[iPos - 1]), 'b').zfill(8)
for x in xrange(8):
if maskB[x] == '1':
eq = (int(netB[x]) == int(ipB[x]))
if not eq:
break
if not eq:
break
return eq
def _checkNetMask(self, net, mask, net_check):
"""
Check if net_check is included in a network/mask
:param net: network address
:param mask: mask address
:param net_check: network to find if it's included
:return:
"""
if net_check == '0.0.0.0/0':
return False
if net == '0.0.0.0' and mask == '0':
return True
try:
net_object_ori = netaddr.IPNetwork(net + '/' + mask)
net_object_dest = netaddr.IPNetwork(net_check)
except:
return False
return net_object_dest in net_object_ori
def _checkPort(self, DP, Port, anyport, strict_search):
"""
Check if "Port" is matched or not for a Source or Destination port of the rule (DP switch)
:param DP: TRUE -> Destination Port, FALSE -> Source Port
:param Port: Port to check
:param anyport: switch to know if with any port matched is enough (usually with a port range)
:return: TRUE/FALSE
"""
lport = False
if DP:
list_port = self.destination_port
else:
list_port = self.source_port
if self.DEBUG:
tools_debug(self.DEBUG, '_checkPort', DP, Port, list_port, anyport, strict_search)
# Port with value 70000 means ANY
if not strict_search and (Port == '0' or list_port == '0'):
lport = True
elif strict_search and (Port == '0' and list_port == '0'):
lport = True
else:
for tport in list_port.split(','):
if '-' in Port:
Port1 = Port.split('-')[0]
Port2 = Port.split('-')[1]
# We are checking a port range
if '-' not in tport:
if anyport:
lport = int(Port1) <= int(tport) <= int(Port2)
# If not anyport is checked, this won't match, so no "else".
else:
if anyport:
# dPort is bigger or dPort1 is inside the rule range or dPort2 is inside the rule range
lport = int(Port1) <= int(tport.split('-')[0]) and int(tport.split('-')[1]) <= int(Port2) or \
int(tport.split('-')[0]) <= int(Port1) <= int(tport.split('-')[1]) or \
int(tport.split('-')[0]) <= int(Port2) <= int(tport.split('-')[1])
else:
lport = int(tport.split('-')[0]) <= int(Port1) and int(Port2) <= int(tport.split('-')[1])
else:
if '-' in tport:
lport = int(tport.split('-')[0]) <= int(Port) <= int(tport.split('-')[1])
else:
lport = int(tport) == int(Port)
if lport:
break
return lport
#################################
# Various Methods #
#################################
def print_rule(self, color=False):
"""
Print a rule
:return: None
"""
if self.DEBUG:
print '[DEBUG][print_rule] Self rule: ', \
'rulenumber:', self.rulenumber, ',', \
'name:', self.name, ',',\
'src:', self.source_address, ',',\
'dst:', self.destination_address, ',',\
'dport:', self.destination_port, ',',\
'sport:', self.source_port, ',',\
'proto:', self.protocol, ',',\
'wildcard:', self.wildcard, ',',\
'permit:', self.permit
else:
print 'Rule number:', self.rulenumber
print 'Rule name:', self.name
print 'Source Address:', self.source_address
if self.source_name != '':
print 'Source Name:', self.source_name
print 'Destination Address' + (' (NEGATED)' if self.dest_negated else '') + ':', self.destination_address
if self.dest_name != '':
print 'Destination Name:', self.dest_name
print 'Destination Port:', self.destination_port
print 'Source Port:', self.source_port
print 'Protocol:', self.protocol
print 'Wildcard:', self.wildcard
if self.comment != '':
print 'Comment:', self.comment
if self.permit:
if color:
print 'Action:', tools_color('B', 'green/black') + 'PERMIT' + tools_color()
else:
print 'Action: PERMIT'
else:
if color:
print 'Action:', tools_color('B', 'red/black') + 'DENY' + tools_color()
else:
print 'Action: DENY'
def check_ip_acl(self, sIP, dIP, dPort, sPort, proto, show_deny_any, hide_allow_all, anyport, strict_search, is_0_any):
"""
Check a flow in a rule
:param sIP: Source IP
:param dIP: Destination IP
:param dPort: Destination Port
:param sPort: Source Port
:param proto: Protocol
:param show_deny_any: Switch to SHOW a match in a DENY ALL ALL
:param hide_allow_all: Switch to HIDE any PERMIT ALL ALL
:param anyport: switch to match any port (usually for port ranges)
:param strict_search: True/False (check explanation in link)
:param is_0_any: FALSE when sIP/dIP is 0.0.0.0 but is the HOST 0.0.0.0/255.255.255.255 (or 0.0.0.0/0.0.0.0 with wildcards)
:return: Integer (rule number matched)
"""
def _any_in_source():
return ('any' in self.source_address) or \
(self.wildcard and self.source_address == '0.0.0.0/255.255.255.255') or \
(not self.wildcard and self.source_address == '0.0.0.0/0.0.0.0')
def _any_in_dest():
return ('any' in self.destination_address) or \
(self.wildcard and self.destination_address == '0.0.0.0/255.255.255.255') or \
(not self.wildcard and self.destination_address == '0.0.0.0/0.0.0.0')
def _check_ip(origin_ip, ip_to_check, wildcard):
match = False
for ip in origin_ip.split(','):
if ':' in ip:
# IPv6
pass
else:
netT = ip.split('/')[0]
filtT = ip.split('/')[1]
if '/' in ip_to_check:
if '.' not in ip_to_check.split('/')[1]:
ip_to_check = ip_to_check.split('/')[0] + '/' + cidr_to_mask(ip_to_check.split('/')[1])
if wildcard:
ip_to_check = ip_to_check.split('/')[0] + '/' + mask_to_wild(ip_to_check.split('/')[1])
match = self._checkNetWild(netT, filtT, ip_to_check)
else:
match = self._checkNetMask(netT, filtT, ip_to_check)
else:
if wildcard:
match = self._checkIPWild(netT, filtT, ip_to_check)
else:
match = self._checkIPMask(netT, filtT, ip_to_check)
if match:
break
return match
if self.DEBUG:
print
tools_debug(self.DEBUG, 'check_ip_acl',
'sIP:', sIP,
'dIP:', dIP,
'dPort:', dPort,
'sPort:', sPort,
'proto:', proto,
'show_deny_any:', show_deny_any,
'hide_allow_all:', hide_allow_all,
'anyport:', anyport,
'strict_search:', strict_search,
'is_0_any:', is_0_any)
self.print_rule()
match = False
checked = False
if not show_deny_any and not self.permit:
if _any_in_source() and _any_in_dest():
return 0
if hide_allow_all and self.permit:
if _any_in_source() and _any_in_dest():
return 0
if self.source_address == '' and self.destination_address == '':
return 0
if self.protocol == 'ip' or self.protocol == proto or (proto == 'ip' and not strict_search):
# Link doesn't allow to check from the command line
# Source IP = 0.0.0.0 and Destination IP = 0.0.0.0
# so if both are 0.0.0.0 it means that it's being used as module
# In this case, the only possible matches are also any any in source and destination
if sIP == '0.0.0.0' and dIP == '0.0.0.0':
if _any_in_source() and _any_in_dest():
# If source/dest are any, then check ports
match = True
else:
return 0
else:
# Verifying that if strict_search = TRUE then when we have an ANY we are searching for 0.0.0.0
if strict_search and is_0_any:
if sIP == '0.0.0.0' and not _any_in_source():
return 0
if dIP == '0.0.0.0' and not _any_in_dest():
return 0
if _any_in_source() and _any_in_dest():
match = True
checked = True
if not match and _any_in_source():
checked = True
if dIP == '0.0.0.0':
if is_0_any:
match = True
else:
match = (self.wildcard and self.destination_address == '0.0.0.0/0.0.0.0') or \
(not self.wildcard and self.destination_address == '0.0.0.0/255.255.255.255')
else:
match = _check_ip(self.destination_address, dIP, self.wildcard)
if self.dest_negated:
match = not match
if not checked and _any_in_dest():
checked = True
if sIP == '0.0.0.0':
if is_0_any:
match = True
else:
match = (self.wildcard and self.source_address == '0.0.0.0/0.0.0.0') or \
(not self.wildcard and self.source_address == '0.0.0.0/255.255.255.255')
else:
match = _check_ip(self.source_address, sIP, self.wildcard)
if self.source_negated:
match = not match
if not checked:
match = _check_ip(self.source_address, sIP, self.wildcard)
if self.source_negated:
match = not match
if match:
match = _check_ip(self.destination_address, dIP, self.wildcard)
if self.dest_negated:
match = not match
if match:
if self.protocol == 'icmp' or self.protocol == 'ip':
return self.rulenumber
if self.protocol == 'vrrp' or self.protocol == '112':
return self.rulenumber
# If a dPort or sPort is specified in the command line, then it makes sense to check the ports when
# the protocol is TCP/UDP
if (dPort != '0' or sPort != '0') and (self.protocol != 'udp' and self.protocol != 'tcp'):
return 0
result = True
for tport in dPort.split(','):
result = result and self._checkPort(True, tport, anyport, strict_search)
if result: # If still TRUE
for tport in sPort.split(','):
result = result and self._checkPort(False, tport, anyport, strict_search)
if result:
return self.rulenumber
return 0
def has_non_contiguous(self):
"""
Check if there is any wildcard non contiguous in the rule
:return: TRUE/FALSE
"""
if self.wildcard:
if self.source_address == 'any':
if self.destination_address == 'any':
return False
else:
return not wild_is_contiguous(self.destination_address.split('/')[1])
elif self.destination_address == 'any':
return not wild_is_contiguous(self.source_address.split('/')[1])
return not (wild_is_contiguous(self.source_address.split('/')[1]) or
wild_is_contiguous(self.destination_address.split('/')[1]))
return False
def compare(self, rule_d):
"""
Compare two rules
:param rule_d: Rule to be compared
:return: 0 -> If the rule is not equal
1 -> If the rule (syntactically) is the same
2 -> If even the name of the rule is the same
"""
if self.DEBUG:
tools_debug(self.DEBUG, 'rule.compare', 'entering')
data_rule = rule_d.get_rule()
result = 0
if self.source_address == data_rule[1] and \
self.destination_address == data_rule[2] and \
self.destination_port == data_rule[3] and \
self.source_port == data_rule[4] and \
self.protocol == data_rule[5] and \
self.permit == data_rule[6] and \
self.wildcard == data_rule[7]:
result = 1
if self.name == data_rule[0]:
result = 2
if self.DEBUG:
tools_debug(self.DEBUG, 'rule.compare', 'result', result)
return result
class FWPolicy (object):
"""
Policy class to store ACL and FW Policies.
Self.rules will contain a list of FWRules with each rule
Rules can be split in several ways:
- splitting by non-continuos wildcards:
This method will create several rules with contiguous wildcards instead of non-contiguous
The name of the rule will be exactly the same than the original rule
- splitting by IP:
This method will create several rules with only one IP in source and destination.
The name of the rules will be changed to:
- <original rule name>{<original rule number>{<counter>{[<source_ip>,<destination_ip>]
"""
DEBUG=False
def __init__(self, name='', filename='', DEBUG=False):
self.name = name
self.filename = filename
self.rules = ['<empty>'] # List of fwrules
self.DEBUG = DEBUG
#################################
# GET Methods #
#################################
def get_rule_action(self, rulenumber):
if len(self.rules) >= rulenumber:
return self.rules[rulenumber-1].get_rule_action()
def get_rules_number(self):
i = 0
if self.rules[0] != '<empty>':
for rule in self.rules:
i += 1
return i
def get_rule(self, rulenumber):
if len(self.rules) >= rulenumber:
return self.rules[rulenumber-1].get_rule()
def get_rule_name(self, rulenumber):
if len(self.rules) >= rulenumber:
return self.rules[rulenumber-1].get_rule_name()
def get_rules(self):
return self.rules
def get_policy_filename(self):
return self.filename
#################################
# SET Methods #
#################################
def set_rule_dyn_data(self, rulenumber, attribute, value):
if len(self.rules) >= rulenumber:
self.rules[rulenumber-1].set_dyn_data(attribute, value)
def set_name(self, name):
self.name = name
def set_empty_rule(self, rulenumber):
self.rules[rulenumber-1].set_empty()
def set_rule_name(self, rulenumber, name):
self.rules[rulenumber - 1].set_name(name)
def set_rule_comment(self, rulenumber, comment):
self.rules[rulenumber - 1].set_comment(comment)
def set_rule_action(self, rulenumber, accept):
self.rules[rulenumber-1].set_action(accept)
def set_all_rules(self, rules_list):
if len(rules_list) > 0:
self.rules = rules_list
else:
self.rules = ['<empty>']
#################################
# Various Methods #
#################################
def new_rule(self, source_address, dest_address, dPort, sPort, protocol, ACCEPT, wildcard, source_name='', dest_name='', source_negated=False, dest_negated=False):
"""
Creates a new rule
:param source_address:
:param dest_address:
:param dPort:
:param sPort:
:param protocol:
:param ACCEPT:
:param wildcard:
:param source_name:
:param dest_name:
:param source_negated
:param dest_negated
:return: Rule number
"""
fwrule = FWRule()
if self.DEBUG:
fwrule.set_debug()
fwrule.set_rule_data(sAddress=source_address,
dAddress=dest_address,
dPort=dPort,
sPort=sPort,
protocol=protocol,
ACCEPT=ACCEPT,
wildcard=wildcard,
source_name=source_name,
dest_name=dest_name,
source_negated=source_negated,
dest_negated=dest_negated)
if self.rules[0] == '<empty>':
self.rules = [fwrule]
else:
self.rules.append(fwrule)
fwrule.set_rulenumber(len(self.rules))
return len(self.rules)
def new_empty_rule(self, wildcard):
"""
Creates a new EMPTY rule
:param wildcard: TRUE/FALSE if the rule has/hasn't a Wildcard
:return: Rule number
"""
fwrule = FWRule()
if self.DEBUG:
fwrule.set_debug()
fwrule.set_rule_data('0.0.0.0/0.0.0.0', '0.0.0.0/0.0.0.0', '0', '0', 'ip', False, wildcard)
if self.rules[0] == '<empty>':
self.rules = [fwrule]
else:
self.rules.append(fwrule)
fwrule.set_rulenumber(len(self.rules))
return len(self.rules)
def link(self, sIP, dIP, dPort, sPort, proto, rules_exclude=[], show_deny=False, hide_allow_all=True, showallmatches=False, ignore_dsmo=False, anyport=False, strict_search=False, is_0_any=True):
"""
Check a flow against the policy
:param sIP: Source IP
:param dIP: Destination IP
:param dPort: Destination Port
:param sPort: Source Port
:param proto: Protocol
:param rules_exclude: Allow to exclude rules from being checked
:param show_deny: Will show if a DENY is matched
:param hide_allow_all: HIDE rules PERMIT ANY ANY
:param showallmatches: will show all rules in the policy that match the flow (not just the first one)
:param ignore_dsmo: Will ignore a match if the rule has a non-contiguous wildcard
:param anyport: anyport matched will match the rule (usually with port ranges)
:param strict_search: It makes a the search of the rules STRICT, so when we look for ANY in Source/Dest (0.0.0.0/0),
only ANY will match (we are not looking for any value but for the 0.0.0.0/0.0.0.0 value). Also
it applies to protocols: if we are looking for IP protocol, only IP protocol will match, but
if we are looking for TCP, TCP and IP will match.
if Source IP or Destination IP is 0.0.0.0 and we want to match them with rules with ANY (or equivalent) in the rule
:param is_0_any: FALSE when sIP/dIP is 0.0.0.0 but is the HOST 0.0.0.0/255.255.255.255 (or 0.0.0.0/0.0.0.0 with wildcards)
:return: list of rules found
"""
rulef = 0
rules_found = []
for rule in self.rules:
if self.DEBUG:
if self.rules[0] == '<empty>':
print '<empty>'
if not self.rules[0] == '<empty>':
rulef = rule.check_ip_acl(sIP, dIP, dPort, sPort, proto, show_deny, hide_allow_all, anyport, strict_search, is_0_any)
if self.DEBUG:
tools_debug(self.DEBUG, 'link', 'Result check_ip_acl:', rulef)
if rulef > 0:
if rules_exclude and rulef in rules_exclude:
continue
if rule.get_contiguous() > 0 and ignore_dsmo:
continue
rules_found.append(rulef)
if not showallmatches:
break
return rules_found
def split_non_contiguous_wild(self):
"""
It will try to split a rule with a non contiguous wildcard into several networks
It's not going to split any rule in more than <NONCONT_LIMIT> networks
:return: None
"""
end = False
pos_list = -1
non_c = False
if type(self.rules[0]) != str: # To avoid errors with "empty" policies
while not end:
pos_list += 1
if pos_list >= len(self.rules):
break
contig_num = self.rules[pos_list].get_contiguous()
if 0 < contig_num <= NONCONT_LIMIT:
# This is a limit in the number of "new rules" that can be created
original_rule = self.rules[pos_list].get_rule()
original_rule_number = pos_list+1
if original_rule[1] != 'any' and original_rule[1] != '0.0.0.0/255.255.255.255':
if not wild_is_contiguous(original_rule[1].split('/')[1]):
netlistS = split_non_contiguous(original_rule[1].split('/')[0], original_rule[1].split('/')[1])
if len(netlistS) > 0:
non_c = True
else:
# It's possible that we can't split the source (because it will create more than 128)
# but still it's possible to split the destination
netlistS = [original_rule[1]]
else:
netlistS = [original_rule[1]]
else:
netlistS = ['0.0.0.0/255.255.255.255']
if original_rule[2] != 'any' and original_rule[2] != '0.0.0.0/255.255.255.255':
if not wild_is_contiguous(original_rule[2].split('/')[1]):
netlistD = split_non_contiguous(original_rule[2].split('/')[0], original_rule[2].split('/')[1])
if len(netlistD) > 0:
non_c = True
else:
netlistD = [original_rule[2]]
else:
netlistD = [original_rule[2]]
else:
netlistD = ['0.0.0.0/255.255.255.255']
# At this point we could have two different list with Source IPs and Dest IP, that together they
# can't reach NONCONT_LIMIT networks. We just combine then.
if non_c:
source_dest_list = []
for a in netlistS:
for b in netlistD:
source_dest_list.append([a,b])
inew_number = original_rule_number
for new_net in source_dest_list:
if source_dest_list.index(new_net) == 0:
self.rules[original_rule_number-1].set_source(new_net[0])
self.rules[original_rule_number-1].set_destination(new_net[1])
else:
inew_number += 1
# The first entry we already have it
newrule = FWRule()
newrule.set_rule_data(sAddress=new_net[0],
dAddress=new_net[1],
dPort=original_rule[3],
sPort=original_rule[4],
protocol=original_rule[5],
ACCEPT=original_rule[6],
wildcard=True,
name=original_rule[0],
rulenumber=inew_number)
if self.DEBUG:
newrule.set_debug()
self.rules.insert(inew_number-1, newrule)
pos_list = inew_number-1 # Lists starts in 0!
if self.get_rules_number() == pos_list or pos_list >= 50000:
# 50000 seems to be a good safe value to break the while in case of any issue. I would not expect to split in more than 50k rules
break
self.renum_policy()
def renum_policy(self):
"""
Renumerate a policy
:return: None
"""
cont=0
for rule in self.rules:
if type(rule) == str:
break
cont += 1
rule.set_rulenumber(cont)
def check_if_any_non_contiguous(self):
"""
Check if there is any non contiguous mask in the policy
:return: TRUE/FALSE
"""
if type(self.rules) != str:
for rule in self.rules:
if rule.has_non_contiguous():
return True
return False
def split_ips(self):
"""
Some ACLs (like Juniper) allow to have multiple IPs in Source and Destination and/or multple ports. This function
split all these rules in rules with only one source and only one destination, adding some extra information
in the name to identify them
The name of the rules will be changed to:
- <original rule name>{<original rule number>{<counter>{[<source_ip>,<destination_ip>]
:return: True
"""
if self.DEBUG:
tools_debug(self.DEBUG, 'split_ips', 'Entering split_ips')
cont_rules = 0
for rule in self.rules:
cont_rules += 1
if self.DEBUG:
if self.rules[0] == '<empty>':
print '<empty>'
else:
rule.print_rule()
if not self.rules[0] == '<empty>':
rule_data = rule.get_rule()
if rule_data[0].startswith('^'): # Special rule, usually empty, we don't need to check it
continue
if ',' not in rule_data[1] and ',' not in rule_data[2] and ',' not in rule_data[3] and ',' not in rule_data[4]:
continue
if ',' in rule_data[1]:
ips_list = rule_data[1].split(',')
else:
ips_list = [rule_data[1]]
if ',' in rule_data[2]:
ipd_list = rule_data[2].split(',')
else:
ipd_list = [rule_data[2]]
if ',' in rule_data[3]:
dport_list = rule_data[3].split(',')
else:
dport_list = [rule_data[3]]
if ',' in rule_data[4]:
sport_list = rule_data[4].split(',')
else:
sport_list = [rule_data[4]]
rule_number = cont_rules
num_split = 0
for ips in ips_list:
for ipd in ipd_list:
for s_port in sport_list:
for d_port in dport_list:
num_split += 1
rule_name = rule_data[0] + '{' + str(cont_rules) + '{' + str(num_split) + '{' + str([ips,ipd])
if rule_number == cont_rules:
'''
We need to split the current rule into multiple rules, so instead of remove the current one
and add all the split, we change the current one with the data of the first "new rule"
'''
rule.set_source(ips)
rule.set_destination(ipd)
rule.set_source_port(s_port)
rule.set_destination_port(d_port)
rule.set_name(rule_name)
else:
newrule = FWRule()
newrule.set_rule_data(sAddress=ips,
dAddress=ipd,
dPort=d_port,
sPort=s_port,
protocol=rule_data[5],
ACCEPT=rule_data[6],
wildcard=False,
name=rule_name,
rulenumber=rule_number,
source_name=rule_data[8],
dest_name=rule_data[9],
source_negated=rule_data[10],
dest_negated=rule_data[11])
if self.DEBUG:
newrule.set_debug()
self.rules.insert(rule_number - 1, newrule)
rule_number += 1
self.renum_policy()
return True
def get_number_split_rules(self, rule_info):
"""
Given a rule_name of a split rule by IP, will return the number of "children" matching that "child".
Example:
rule_info = term testt1{5{2{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.0']
This rule, it's a compound one, with name: "term testt1" and number: "5", and this is the child '5{2'
Will return the number of rules matching "term testt1{5"
:param rule_info: Rule name of a child rule split by IP
:return: Number of rules matching the info of the child
"""
rule_data = rule_info.split('{')
if len(rule_data) < 2:
return -1
if self.rules[0] == '<empty>':
return -1
rule_query = rule_data[0] + '{' + rule_data[1]
i = 0
for rule in self.rules:
if rule_query in rule.get_rule()[0]:
i += 1
return i
def remove_rule(self, rule_number):
if self.DEBUG:
tools_debug(self.DEBUG, 'find_rule', 'Entering remove_rule', rule_number)
if self.rules[0] == '<empty>':
return False
for rule in self.rules:
if rule.get_rule_number() == rule_number:
self.rules.remove(rule)
break
if len(self.rules) == 0:
self.rules = ['<empty>']
return True
def last_deny(self):
"""
Check if the last rule is DENY ANY ANY
:return: TRUE/FALSE
"""
if self.DEBUG:
tools_debug(self.DEBUG, 'last_deny', 'Entering')
if self.rules[0] == '<empty>':
return False
last = self.rules[-1].get_rule()
if not last[6]: # DENY
if last[7]: # Wildcard
return last[1] == '0.0.0.0/255.255.255.255' and last[2] == '0.0.0.0/255.255.255.255'
else:
return last[1] == '0.0.0.0/0.0.0.0' and last[2] == '0.0.0.0/0.0.0.0'
return False
def remove_shadowed_rules(self):
"""
Check for a "basic" shadowed. It's only going to remove those rules that are fully shadowed.
This method requires that the rules were split before
:return: Number of removed rules (None in case of any error)
"""
num_rules_removed = 0
list_rules_removed = {}
if self.DEBUG:
tools_debug(self.DEBUG, 'remove_shadowed_rules', 'Entering"')
if self.rules[0] == '<empty>':
return list_rules_removed
num_rule = len(self.rules)
while num_rule > 0:
rule = self.rules[num_rule-1].get_rule()
# This method requires that the rules were split before
if ',' in rule[1] or ',' in rule[2]:
return None
if rule[6]: # Only for Permit rules
if rule[7]: # If it's a wildcard we need to change it
# Source
ip = rule[1].split('/')[0]
wild = rule[1].split('/')[1]
mask = mask_to_wild(wild) # This function is "bidirectional"
rule[1] = ip + '/' + mask
# Destination
ip = rule[2].split('/')[0]
wild = rule[2].split('/')[1]
mask = mask_to_wild(wild) # This function is "bidirectional"
rule[2] = ip + '/' + mask
check = self.link(rule[1], rule[2], rule[3], rule[4], rule[5], rules_exclude=[num_rule], show_deny=True, hide_allow_all=False, strict_search=True, is_0_any=True, anyport=True)
if len(check) > 0:
t_rule = self.rules[check[0]-1].get_rule()
if t_rule[6]:
if self.DEBUG:
tools_debug(self.DEBUG, 'remove_shadowed_rules', 'Removing rule', num_rule)
list_rules_removed[rule[0]] = self.rules[check[0]-1].get_rule()[0]
self.remove_rule(num_rule)
self.renum_policy()
num_rules_removed += 1
else:
if self.DEBUG:
tools_debug(self.DEBUG, 'remove_shadowed_rules', 'Matched with DENY. NOT removing', num_rule)
num_rule -= 1
return list_rules_removed
#################################
# Output Methods #
#################################
def print_policy(self):
"""
Print policy
:return:
"""
print 'Policy:', self.name
for rule in self.rules:
print
if type(rule) == str:
print rule
else:
rule.print_rule()
def print_rule(self, rulenumber, color=False):
"""
Print a specific rule
:param rulenumber: Number of the rule to print
:param color: Switch to print colors
:return:
"""
if len(self.rules) >= rulenumber:
self.rules[rulenumber-1].print_rule(color=color)
|
salesforce/smartACL
|
smartACL/link_cisco.py
|
<reponame>salesforce/smartACL
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import sys
from linkdef import *
def cidr_to_mask(cidr):
return '.'.join([str((0xffffffff << (32 - int(cidr)) >> i) & 0xff)
for i in [24, 16, 8, 0]])
def acl_parser(filename, policy, remarkasname=False, DEBUG=False):
"""
Parsing Cisco ACL
:param filename: could be a file or if it's use as module a Python list with all the ACLs
:param policy: policy object
:param remarkasname: use remarks comments as rule names
:param DEBUG: DEBUG flag
:return: True
"""
rule_name = ''
if type(filename) is not list:
f = open(filename, 'r')
else:
f = filename
# Usually Cisco ACL is using wildcard, but it's possible to find a mask with prefix. It's not possible to have an ACL
# with wildcard and without, so we need to find if there the ACL is using wildcard or not
acl_wild = True
for line in f:
line = line.strip()
if line.startswith('permit') or line.startswith('deny'):
if '/' in line:
acl_wild = False
break
# There are much better ways to check this, but this will work in a fast way
line_s = line.split()
if len(line_s) > 4:
if line_s[3].startswith('0') and line_s[3] != '0.0.0.0':
# We find one wildcard, we don't need to continue reading the rest of the file
break
if type(f) is not list:
f.seek(0)
for line in f:
iPos = 0
line = line.strip()
if DEBUG:
print '\n[DEBUG] Cisco ACL Parser:', line
# Lines with a reference to any object using '{<name>}' are ignored
if (line.startswith('permit') or line.startswith('deny')) and not 'established' in line and not '{' in line:
sPortACL = '0'
dPortACL = '0'
aclsp = line.split()
lPermit = aclsp[iPos] == 'permit'
iPos += 1
# Protocol
protocolACL = aclsp[iPos]
if protocolACL not in ['tcp', 'udp', 'icmp', 'igmp', 'pim', 'ip', 'ospf']:
try:
int(protocolACL)
if protocolACL == '6':
protocolACL = 'tcp'
elif protocolACL == '17':
protocolACL = 'udp'
elif protocolACL == '1':
protocolACL = 'icmp'
except ValueError:
if protocolACL == 'host':
protocolACL = 'ip'
iPos -= 1 # Exceptional case when the ACL is just "permit host X.X.X.X "
elif '.' in protocolACL:
protocolACL = 'ip'
iPos -= 1 # Exceptional case when the ACL is just "permit X.X.X.X W.W.W.W"
elif 'any' in protocolACL:
protocolACL = 'ip' # Exceptional case when ACL is just "deny any log"
else:
print '[ERROR] Error processing file: ', filename
print '[ERROR] Can\'t parse the line: ', line
print '[ERROR] Error with:', protocolACL
print 'Press Enter to continue...'
raw_input()
continue
iPos += 1
try:
# Source/Mask
if aclsp[iPos] == 'any':
sourceIPACL = 'any'
iPos += 1
elif aclsp[iPos] == 'log':
sourceIPACL = 'any'
iPos += 1
else:
if aclsp[iPos] == 'host':
if not acl_wild:
sourceIPACL = aclsp[iPos+1] + '/2192.168.127.12'
else:
sourceIPACL = aclsp[iPos+1] + '/0.0.0.0'
iPos += 2
else:
if '/' in aclsp[iPos]:
sourceIPACL = aclsp[iPos].split('/')[0] + '/' + cidr_to_mask(aclsp[iPos].split('/')[1])
iPos += 1
else:
if len(aclsp) == iPos + 1: # ACL only with permit IP
if not acl_wild:
sourceIPACL = aclsp[iPos] + '/255.255.255.255'
else:
sourceIPACL = aclsp[iPos] + '/0.0.0.0'
else:
sourceIPACL = aclsp[iPos] + '/' + aclsp[iPos + 1]
iPos += 2
if sourceIPACL == 'any' and acl_wild:
sourceIPACL = '0.0.0.0/255.255.255.255'
elif sourceIPACL == 'any' and not acl_wild:
sourceIPACL = '0.0.0.0/0.0.0.0'
# It's possible than the ACL would be "permit host X.X.X.X"
if len(aclsp) <= iPos:
if acl_wild:
destIPACL = '0.0.0.0/255.255.255.255'
else:
destIPACL = '0.0.0.0/0.0.0.0'
else:
# Source Operator
if protocolACL == 'tcp' or protocolACL == 'udp':
if aclsp[iPos] == 'eq':
sPortACL = aclsp[iPos+1]
iPos += 2
elif aclsp[iPos] == 'neq' or aclsp[iPos] == 'lt' or aclsp[iPos] == 'gt':
if aclsp[iPos] == 'gt':
sPortACL = str(int(aclsp[iPos+1]) + 1) + '-65535'
iPos += 2
elif aclsp[iPos] == 'lt':
sPortACL = '0-' + str(int(aclsp[iPos+1]) - 1)
iPos += 2
elif aclsp[iPos] == 'neq':
sPortACL = '0-' + str(int(aclsp[iPos+1]) - 1) + ',' + str(int(aclsp[iPos+1]) + 1) + '-65535'
iPos += 2
elif aclsp[iPos] == 'range':
sPortACL = aclsp[iPos+1] + "-" + aclsp[iPos+2]
iPos += 3
if '-' in sPortACL:
# ftp-data has a '-' and now is late to change '-' as divisor for ranges
if 'ftp-data' in sPortACL:
sPortACL = sPortACL.replace('ftp-data', 'ftpdata')
t1 = sPortACL.split('-')[0]
t2 = sPortACL.split('-')[1]
if t1 in port_number:
t1 = port_number[t1]
sPortACL = t1 + '-' + t2
if t2 in port_number:
t2 = port_number[t2]
sPortACL = t1 + '-' + t2
else:
if sPortACL in port_number:
sPortACL = port_number[sPortACL]
# Destination/Mask
if aclsp[iPos] == 'any':
destIPACL = 'any'
iPos += 1
else:
if aclsp[iPos] == 'host':
if not acl_wild: # Checked while checking source
destIPACL = aclsp[iPos + 1] + '/255.255.255.255'
else:
destIPACL = aclsp[iPos+1] + '/0.0.0.0'
iPos += 2
else:
if '/' in aclsp[iPos]:
destIPACL = aclsp[iPos].split('/')[0] + '/' + cidr_to_mask(aclsp[iPos].split('/')[1])
iPos += 1
else:
destIPACL = aclsp[iPos] + '/' + aclsp[iPos + 1]
iPos += 2
if destIPACL == 'any' and acl_wild:
destIPACL = '0.0.0.0/255.255.255.255'
elif destIPACL == 'any' and not acl_wild:
destIPACL = '0.0.0.0/0.0.0.0'
# Dest Operator
if protocolACL == 'tcp' or protocolACL == 'udp':
if len(aclsp) > iPos:
if aclsp[iPos] == 'eq':
dPortACL = aclsp[iPos+1]
iPos += 2
elif aclsp[iPos] == 'neq' or aclsp[iPos] == 'lt' or aclsp[iPos] == 'gt':
if aclsp[iPos] == 'gt':
dPortACL = str(int(aclsp[iPos + 1]) + 1) + '-65535'
iPos += 2
elif aclsp[iPos] == 'lt':
dPortACL = '0-' + str(int(aclsp[iPos + 1]) - 1)
iPos += 2
elif aclsp[iPos] == 'neq':
dPortACL = '0-' + str(int(aclsp[iPos + 1]) - 1) + ',' + str(int(aclsp[iPos + 1]) + 1) + '-65535'
iPos += 2
elif aclsp[iPos] == 'range':
dPortACL = aclsp[iPos+1] + "-" + aclsp[iPos+2]
iPos += 3
if '-' in dPortACL:
if 'ftp-data' in dPortACL:
dPortACL = dPortACL.replace('ftp-data', 'ftpdata')
t1 = dPortACL.split('-')[0]
t2 = dPortACL.split('-')[1]
if t1 in port_number:
t1 = port_number[t1]
dPortACL = t1 + '-' + t2
if t2 in port_number:
t2 = port_number[t2]
dPortACL = t1 + '-' + t2
else:
if dPortACL in port_number:
dPortACL = port_number[dPortACL]
except Exception, error_message:
print '[ERROR] Error processing file: ', filename
print '[ERROR] Can\'t parse the line: ', line
print '[ERROR] ERROR MESSAGE:', error_message
print 'Press Enter to continue...'
raw_input()
continue
r = policy.new_rule(sourceIPACL,
destIPACL,
dPortACL,
sPortACL,
protocolACL,
lPermit,
acl_wild)
if type(filename) is list or rule_name == '':
policy.set_rule_name(r, line)
else:
policy.set_rule_name(r, rule_name)
elif line.startswith('remark'):
if DEBUG:
print '\n[DEBUG]', line
if remarkasname:
rule_name = line
if type(filename) is not list:
f.close()
return True
"""
MAIN
"""
if __name__ == "__main__":
d=False
policy = FWPolicy(sys.argv[1],sys.argv[1], DEBUG=d)
acl_parser(sys.argv[1], policy)
policy.remove_shadowed_rules()
policy.print_policy()
|
salesforce/smartACL
|
smartACL/dsmo_split.py
|
<reponame>salesforce/smartACL<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import argparse
import linkdef
import tools
try:
import ipaddr
except:
try:
import third_party.ipaddr as ipaddr
except:
raise ImportError("The ipaddr module is not installed, exiting...")
parser = argparse.ArgumentParser()
parser.add_argument('network', help='Network with DSMO mask in the format X.X.X.X/Y.Y.Y.Y')
parser.add_argument('--host', help='Display the split as HOST (/32)', action='store_true')
args = parser.parse_args()
dsmo_net = args.network
if len(dsmo_net.split('/')) != 2 or len(dsmo_net.split('.')) != 7:
print 'ERROR: Format of network not valid. It should be in the format X.X.X.X/Y.Y.Y.Y'
quit()
only_host = args.host
net_split = tools.split_non_contiguous(dsmo_net.split('/')[0], dsmo_net.split('/')[1])
for net in net_split:
ip = net.split('/')[0]
netmask = tools.mask_to_wild(net.split('/')[1])
if only_host:
if '/0.0.0.0' in net:
# 0.0.0.0 in wildcard is host but in also means ANY is 'standard' way
net = net.split('/')[0] + '/255.255.255.255'
ip = ipaddr.IPv4Network(net)
for host in list(ip):
ip_host = str(host)
print ip_host
else:
print ip + '/' + netmask
|
salesforce/smartACL
|
smartACL/link_pol.py
|
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import sys
import os
import copy
from linkdef import *
def cidr_to_mask(cidr):
return '.'.join([str((0xffffffff << (32 - int(cidr)) >> i) & 0xff)
for i in [24, 16, 8, 0]])
NETWORK_DEF = 'NETWORK.net'
SERVICE_DEF = 'SERVICES.svc'
TOKENS = {'source-address': [],
'destination-address': [],
'protocol': [],
'source-port': [],
'destination-port': [],
'comment': '',
'action': ''}
INVALID_TOKENS = ['source-prefix']
def pol_parser(filename, definition_dir, policy, DEBUG=False):
"""
Parsing Capirca POL files
:param filename: file with the policy
:param definition_dir: NETWORK.net and SERVICES.svc directory
:param policy: policy object
:param DEBUG: DEBUG flag
:return: True
"""
def pol_parse_definitions(filename):
definitions = {}
inside = False
f = open(filename, 'r')
for line in f:
line = line.strip()
if line.startswith('#') or line == '':
continue
if '=' in line:
inside = True
line_split = line.split('=')
top = line_split[0].strip()
if line_split[1].strip() == '':
definitions[top] = []
else:
t = line_split[1].strip()
if '#' in t:
t = t.split('#')[0].strip()
definitions[top] = [t]
elif '=' not in line and inside:
t = line
if '#' in t:
t = t.split('#')[0].strip()
definitions[top].append(t)
else:
print '[ERROR] Parsing policy file:', filename, 'line', line
raw_input()
return definitions
def process_rule(rule, net_def, ser_def):
def _get_ip(t, res):
for i in t:
if i in net_def:
i = net_def[i]
_get_ip(i, res)
else:
res.append(i)
rule['source-name'] = ','.join(rule['source-address'])
rule['destination-name'] = ','.join(rule['destination-address'])
# Source IP
t = []
_get_ip(rule['source-address'], t)
t2 = []
if t == []:
t2 = ['0.0.0.0/0.0.0.0']
else:
for i in t:
if '/' in i:
t2.append(i.split('/')[0] + '/' + cidr_to_mask(i.split('/')[1]))
else:
t2.append(i + '/25192.168.3.11')
rule['source-address'] = ','.join(t2)
# Destination IP
t = []
_get_ip(rule['destination-address'], t)
t2 = []
if t == []:
t2 = ['0.0.0.0/0.0.0.0']
else:
for i in t:
if '/' in i:
t2.append(i.split('/')[0] + '/' + cidr_to_mask(i.split('/')[1]))
else:
t2.append(i + '/25192.168.3.11')
rule['destination-address'] = ','.join(t2)
# Source Port
t = []
for i in rule['source-port']:
if i in ser_def:
for i2 in ser_def[i]:
t.append(i2.split('/')[0])
else:
print '[ERROR] Parsing policy file:', filename, 'line', line
raw_input()
continue
if t == []:
t = ['0']
rule['source-port'] = ','.join(t)
# Destination Port
t = []
for i in rule['destination-port']:
if i in ser_def:
for i2 in ser_def[i]:
t.append(i2.split('/')[0])
else:
print '[ERROR] Parsing policy file:', filename, 'line', line
raw_input()
continue
if t == []:
t = ['0']
rule['destination-port'] = ','.join(t)
###### MAIN #####
net_def = ''
ser_def = ''
for file in os.listdir(definition_dir):
if file == NETWORK_DEF or file == filename.split('/')[-1] + '.' + NETWORK_DEF:
net_def = pol_parse_definitions(definition_dir + '/' + file)
elif file == SERVICE_DEF or file == filename.split('/')[-1]+ '.' + SERVICE_DEF:
ser_def = pol_parse_definitions(definition_dir + '/' + file)
if net_def == '' or ser_def == '':
print '[ERROR] Network and/or Services definitions couldn\'t be loaded'
raw_input()
return False
inside_term = False
invalid_rule = False
header = False
f = open(filename, 'r')
new_rule = copy.deepcopy(TOKENS)
# Pre-parsing the file to "fix" small format issues:
data_file = []
for line in f:
line = line.strip()
if line.startswith('}'):
if line.split('}')[1].strip() != '':
data_file.append('}')
line = line[1:]
data_file.append(line)
for line in data_file:
line = line.strip()
if DEBUG:
print '\n[DEBUG] POL ACL Parser:', line
if line.startswith('header'):
header = True
continue
if header:
if line.startswith('}'):
header = False
continue
if line.startswith('#') or line == '':
continue
if line.startswith('term'):
if inside_term:
print '[ERROR] Parsing policy file:', file, 'line', line
raw_input()
continue
inside_term = True
rule_name = line.split('{')[0].split(' ')[1].strip()
del new_rule
new_rule = copy.deepcopy(TOKENS)
continue
elif line.startswith('}'):
inside_term = False
if DEBUG:
print '\n[DEBUG] POL ACL Parser. NEW_RULE:', new_rule
process_rule(new_rule, net_def, ser_def)
if DEBUG:
print '\n[DEBUG] POL ACL Parser. NEW_RULE PROCESSED:', new_rule
if not invalid_rule:
if len(new_rule['protocol']) == 0:
new_rule['protocol'] = ['ip']
for i in new_rule['protocol']:
r = policy.new_rule(new_rule['source-address'], new_rule['destination-address'], new_rule['destination-port'], new_rule['source-port'], i, new_rule['action'] == 'accept', False, new_rule['source-name'], new_rule['destination-name'])
policy.set_rule_name(r, rule_name)
policy.set_rule_comment(r, new_rule['comment'])
invalid_rule = False
continue
token = line.split('::')[0].strip()
if token in INVALID_TOKENS:
invalid_rule = True
elif token in TOKENS:
if token == 'comment' or token == 'action':
new_rule[token] = line.split('::')[1].strip()
else:
v = line.split('::')[1].strip()
for i in v.split(' '):
if i.strip() == '':
continue
new_rule[token].append(i)
return True
if __name__ == "__main__":
policy = FWPolicy(sys.argv[1], sys.argv[1])
pol_parser(sys.argv[1], sys.argv[2], policy, True)
policy.print_policy()
policy.split_ips()
print '\n\n\n\nPOLICY SPLIT!\n\n\n\n'
policy.print_policy()
|
salesforce/smartACL
|
tests/test_smartCompare.py
|
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import unittest
import sys
import os
from smartACL import linkdef
from smartACL import link_cisco
from smartACL import link_juniper
from smartACL import smartACL
class smartTest(unittest.TestCase):
def setUp(self):
self.filet1 = 'tests/test_data/test_acl_smartCompare1'
self.filet2 = 'tests/test_data/test_acl_smartCompare2'
self.filet2a = 'tests/test_data/test_acl_smartCompare2a'
self.filet3 = 'tests/test_data/test_acl_smartCompare3'
self.filet4 = 'tests/test_data/test_acl_smartCompare4'
self.filet5 = 'tests/test_data/test_acl_smartCompare5'
self.filet6 = 'tests/test_data/test_acl_smartCompare6'
self.filet7 = 'tests/test_data/test_acl_smartCompare7'
self.filet8 = 'tests/test_data/test_acl_smartCompare8'
self.filet9 = 'tests/test_data/test_acl_smartCompare9'
self.filet10 = 'tests/test_data/test_acl_smartCompare10'
self.filet11 = 'tests/test_data/test_acl_smartCompare11'
self.filet12 = 'tests/test_data/test_acl_smartCompare12'
self.filet13 = 'tests/test_data/test_acl_smartCompare13'
self.filet14 = 'tests/test_data/test_acl_smartCompare14'
self.filet15 = 'tests/test_data/test_acl_smartCompare15'
self.filet16 = 'tests/test_data/test_acl_smartCompare16'
self.filet17 = 'tests/test_data/test_acl_smartCompare17'
self.filet18 = 'tests/test_data/test_acl_smartCompare18'
self.filet19 = 'tests/test_data/test_acl_smartCompare19'
self.results_t1_t2 = ['permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.127 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080']
self.results_t1_t2a = ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.127 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080']
self.results_t2a_t2a = ['deny tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127 eq 22', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080']
self.results_t3_t4 = ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7081']
self.results_t5_t6 = ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.127 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080']
self.results_t7_t8 = ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.192 0.0.0.63 eq 7080']
self.results_t8_t7 = ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.254 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.1 0.0.0.254 eq 7080']
self.results_t7_t7 = ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.64 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.128 0.0.0.63 eq 7080', 'permit tcp 10.231.69.128 0.0.0.127 10.0.0.192 0.0.0.63 eq 7080']
'''
The T9 - T9 comparison is an interesting case. T9 has two shadowed rules inside, so when we try to compare it with itself,
the two shadowed rules are shown like NOT matched. That is completely TRUE. Although it could seem to be inconsistent,
indeed these two lines will be never matched, so in this case, smartCompare is working fine.
The same would apply to T10 - T10 and T9 - T10
'''
self.results_t9_t9 = ['term testt1', 'term testt2', 'term testt3', 'term testt4']
self.results_t10_t10 = ['term testt1', "term testt2{2{1{['10.0.0.192/255.255.255.192', '10.0.1.0/255.255.255.128']", "term testt2{2{2{['10.0.0.192/255.255.255.192', '10.0.1.128/255.255.255.192']", 'term testt3', 'term testt4']
self.results_t9_t10 = ['term testt3', 'term testt4']
self.results_t11_t12 = ['term testt2', 'term testt3', 'term testt5']
self.results_t11_t12_is = ['term testt3', 'term testt5']
self.results_t13_t13 = ['permit udp 0.0.0.0 0.0.0.0 eq 67 255.255.255.255 0.0.0.0 eq 68', 'permit udp any eq 68 255.255.255.255 0.0.0.0 eq 67', 'permit udp 192.168.1.0 0.0.0.63 eq 68 any eq 68', 'permit udp 192.168.1.192 0.0.0.63 eq 68 any eq 68']
self.results_t14_t15 = ['permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127']
self.results_t15_t14 = []
self.results_t16_t17 = []
self.results_t17_t16 = []
self.results_t18_t19 = ['term testt1', 'term testt2']
self.results_t19_t18 = ["term testt1{1{1{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.0']", "term testt1{1{2{['10.0.0.0/255.255.255.0', '10.0.1.0/255.255.255.0']"]
null = open(os.devnull, 'w')
self.stdout = sys.stdout
sys.stdout = null
self.longMessage = True
def test_smartCompare_t1_t2(self):
policy1 = linkdef.FWPolicy('', self.filet1, False)
link_cisco.acl_parser(self.filet1, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet2, False)
link_cisco.acl_parser(self.filet2, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t1_t2, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t1_t2, 'Ignoring Shadowed Rules')
def test_smartCompare_t1_t2a(self):
policy1 = linkdef.FWPolicy('', self.filet1, False)
link_cisco.acl_parser(self.filet1, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet2a, False)
link_cisco.acl_parser(self.filet2a, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t1_t2a, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t1_t2a, 'Ignoring Shadowed Rules')
def test_smartCompare_t2a_t2a(self):
policy1 = linkdef.FWPolicy('', self.filet2a, False)
link_cisco.acl_parser(self.filet2a, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet2a, False)
link_cisco.acl_parser(self.filet2a, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t2a_t2a, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t2a_t2a, 'Ignoring Shadowed Rules')
def test_smartCompare_t3_t4(self):
policy1 = linkdef.FWPolicy('', self.filet3, False)
link_cisco.acl_parser(self.filet3, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet4, False)
link_cisco.acl_parser(self.filet4, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t3_t4, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t3_t4, 'Ignoring Shadowed Rules')
def test_smartCompare_t5_t6(self):
policy1 = linkdef.FWPolicy('', self.filet5, False)
link_cisco.acl_parser(self.filet5, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet6, False)
link_cisco.acl_parser(self.filet6, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t5_t6, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t5_t6, 'Ignoring Shadowed Rules')
def test_smartCompare_t7_t7(self):
policy1 = linkdef.FWPolicy('', self.filet7, False)
link_cisco.acl_parser(self.filet7, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet7, False)
link_cisco.acl_parser(self.filet7, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t7_t7, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t7_t7, 'Ignoring Shadowed Rules')
def test_smartCompare_t7_t8(self):
policy1 = linkdef.FWPolicy('', self.filet7, False)
link_cisco.acl_parser(self.filet7, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet8, False)
link_cisco.acl_parser(self.filet8, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t7_t8, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t7_t8, 'Ignoring Shadowed Rules')
def test_smartCompare_t8_t7(self):
policy1 = linkdef.FWPolicy('', self.filet8, False)
link_cisco.acl_parser(self.filet8, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet7, False)
link_cisco.acl_parser(self.filet7, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t8_t7, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t8_t7, 'Ignoring Shadowed Rules')
def test_smartCompare_t9_t9(self):
policy1 = linkdef.FWPolicy('', self.filet9, False)
link_juniper.jcl_parser(self.filet9, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet9, False)
link_juniper.jcl_parser(self.filet9, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t9_t9, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
# Because the shadowed rule is removed, both list need to be sorted first.
self.assertEqual(smartacl_result.sort(), self.results_t9_t9.sort(), 'Ignoring Shadowed Rules')
def test_smartCompare_t10_t10(self):
policy1 = linkdef.FWPolicy('', self.filet10, False)
link_juniper.jcl_parser(self.filet10, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet10, False)
link_juniper.jcl_parser(self.filet10, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t10_t10, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t10_t10, 'Ignoring Shadowed Rules')
def test_smartCompare_t9_t10(self):
policy1 = linkdef.FWPolicy('', self.filet9, False)
link_juniper.jcl_parser(self.filet9, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet10, False)
link_juniper.jcl_parser(self.filet10, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t9_t10, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t9_t10, 'Ignoring Shadowed Rules')
def test_smartCompare_t11_t12(self):
policy1 = linkdef.FWPolicy('', self.filet11, False)
link_juniper.jcl_parser(self.filet11, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet12, False)
link_juniper.jcl_parser(self.filet12, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t11_t12, 'Normal Test')
'''
This is a very special case that it's better to have it separated because the results are different with/without "ignoreshadow" option.
Explanation (simplified):
- ACL1:
- Rule1
- Rule2 -> Shadowed by Rule1
- ACL2:
- Rule2
With ignoreshadow FALSE:
- The Rule1 is NOT in ACL2
- The Rule2 is in ACL2
- The output shows that Rule1 is missing
With ignoreshadow TRUE:
- The Rule2 is removed because it's shadowed by Rule1
- The Rule1 is NOT in ACL2
- The output shows Rule1 and Rule2 are missing (Rule1 logically, but also all shadowed rules like Rule2)
'''
def test_smartCompare_t11_t12_ignoreshadowed(self):
policy1 = linkdef.FWPolicy('', self.filet11, False)
link_juniper.jcl_parser(self.filet11, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet12, False)
link_juniper.jcl_parser(self.filet12, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t11_t12_is, 'Ignoring Shadowed Rules')
def test_smartCompare_t13_t13(self):
policy1 = linkdef.FWPolicy('', self.filet13, False)
link_cisco.acl_parser(self.filet13, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet13, False)
link_cisco.acl_parser(self.filet13, policy2, False)
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t13_t13, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t13_t13, 'Ignoring Shadowed Rules')
def test_smartCompare_t14_t15(self):
policy1 = linkdef.FWPolicy('', self.filet14, False)
link_cisco.acl_parser(self.filet14, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet15, False)
link_cisco.acl_parser(self.filet15, policy2, False)
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t14_t15, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t14_t15, 'Ignoring Shadowed Rules')
def test_smartCompare_t15_t14(self):
policy1 = linkdef.FWPolicy('', self.filet15, False)
link_cisco.acl_parser(self.filet15, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet14, False)
link_cisco.acl_parser(self.filet14, policy2, False)
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t15_t14, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t15_t14, 'Ignoring Shadowed Rules')
def test_smartCompare_t16_t17(self):
policy1 = linkdef.FWPolicy('', self.filet16, False)
link_cisco.acl_parser(self.filet16, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet17, False)
link_cisco.acl_parser(self.filet17, policy2, False)
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t16_t17, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t16_t17, 'Ignoring Shadowed Rules')
def test_smartCompare_t17_t16(self):
policy1 = linkdef.FWPolicy('', self.filet17, False)
link_cisco.acl_parser(self.filet17, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet16, False)
link_cisco.acl_parser(self.filet16, policy2, False)
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t17_t16, 'Normal Test')
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=True, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t17_t16, 'Ignoring Shadowed Rules')
def test_smartCompare_t18_t19(self):
policy1 = linkdef.FWPolicy('', self.filet18, False)
link_juniper.jcl_parser(self.filet18, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet19, False)
link_juniper.jcl_parser(self.filet19, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t18_t19, 'Normal Test')
def test_smartCompare_t19_t18(self):
policy1 = linkdef.FWPolicy('', self.filet19, False)
link_juniper.jcl_parser(self.filet19, policy1, False)
policy2 = linkdef.FWPolicy('', self.filet18, False)
link_juniper.jcl_parser(self.filet18, policy2, False)
policy1.split_ips()
policy2.split_ips()
smartacl_result = smartACL.smartCompare2(policy1, policy2, verbose=False,only_different=False,outprint=False,ignore_lines='',ignoredeny=False, ignoreshadowed=False, DEBUG=False)
self.assertEqual(smartacl_result, self.results_t19_t18, 'Normal Test')
def tearDown(self):
sys.stdout = self.stdout
if __name__ == '__main__':
unittest.main()
|
salesforce/smartACL
|
smartACL/link_fortigate.py
|
<reponame>salesforce/smartACL<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import sys
from linkdef import *
try:
import netaddr
except:
try:
import sys
sys.path.insert(0, 'smartACL/third_party')
sys.path.insert(0, 'third_party')
import third_party.netaddr as netaddr
except:
raise ImportError("The netaddr module is not installed, exiting...")
###### ADDRESS OBJECTS
ADDRESS_OBJ = 'config firewall address'
# type iprange
# start-ip -> ip
# end-ip -> ip
#
# type ipmask
# subnet -> X.X.X.X Y.Y.Y.Y (net / mask)
MULTICAST_OBJ = 'config firewall multicast-address'
# type multicastrange
# start-ip -> ip
# end-ip -> ip
ADDRESS6_OBJ = 'config firewall address6'
# type ipprefix
# ip6 -> fdff:ffff::/120
ADDGRP_OBJ = 'config firewall addrgrp'
# member -> list separated by space of other objects
SERVICE_OBJ = 'config firewall service custom'
SERVICEGRP_OBJ ='config firewall service group'
IPPOOL_OBJ = 'config firewall ippool'
FIREWALL_VIP = 'config firewall vip'
# VIPs are SPECIAL Object -> Inside it's included protocol and PORT, we need to use this for the rule
FIREWALL_POLICY = 'config firewall policy'
addr_dict = {}
serv_dict = {}
ippool_dict = {}
fwvip_dict = {}
def get_ips(name):
def expand_ips(start_ip, end_ip):
list_ip = []
ips = netaddr.cidr_merge(list(netaddr.iter_iprange(start_ip, end_ip)))
for i in ips:
ip = str(i.ip) + '/' + str(i.netmask)
list_ip.append(ip)
return list_ip
ip_list = []
if len(name.split()) > 1:
for i in name.split():
ip_t = ' '.join(get_ips(i)).strip()
ip_list.append(ip_t)
else:
if name not in addr_dict and name not in fwvip_dict:
print 'ERROR: Object:', name, 'found in policy but NOT found in objects.'
return None
if name in addr_dict:
value = addr_dict[name]
if 'member' in value:
# It's an Address Group
members = value['member'].split()
for member in members:
if member not in addr_dict:
print 'ERROR: Object:', member,'referenced in Address Group object:', name, 'but not found.'
continue
ip_t = ' '.join(get_ips(member)).strip()
ip_list.append(ip_t)
#
# The original idea was to check:
# - first, if "member" was included, so the object should be a addrgrp
# - second, use "type" to identify the object, but unfortunately this is not possible because
# Fortigates allows to have different object with the same name, but from the Policy there is
# no information which object should be used first. We assume that it should be first Address
# type
#
#
elif 'subnet' in value:
ip_list = [value['subnet'].split()[0] + '/' + value['subnet'].split()[1]]
elif 'multicastrange' in value or 'iprange' in value:
ip_list = expand_ips(value['start-ip'], value['end-ip'])
else:
print 'ERROR: Object:', name, 'can not be identified'
else:
ip_list = [fwvip_dict[name]['extip']]
return ip_list
def get_service(name):
service_list = []
if len(name.split()) > 1:
for i in name.split():
service_list = service_list + get_service(i)
else:
if name not in serv_dict:
print 'ERROR: Object:', name, 'found in policy but NOT found in objects.'
return None
value = serv_dict[name]
if 'member' in value:
members = value['member'].split()
for member in members:
if member not in serv_dict:
print 'ERROR: Object:', member, 'referenced in Service Group object:', name, 'but not found.'
continue
service_list = service_list + get_service(member)
else:
if 'protocol' in value and value['protocol'] == 'ICMP':
service_list.append('icmp')
elif 'protocol' in value and value['protocol'] == 'IP':
service_list.append('ip')
else:
if 'tcp-portrange' in value:
for i in value['tcp-portrange'].split():
service_list.append(i + '/tcp')
if 'udp-portrange' in value:
for i in value['udp-portrange'].split():
service_list.append(i + '/udp')
return service_list
def for_parser(filename, policy, DEBUG=False):
if type(filename) is not list:
f = open(filename, 'r')
else:
f = filename
config_addr = False
config_serv = False
config_ippool = False
config_fwvip = False
config_policy = False
policy.set_name('Fortigate Policy')
for line in f:
line = line.strip()
if DEBUG:
print line
if line in [ADDRESS_OBJ, MULTICAST_OBJ, ADDRESS6_OBJ, ADDGRP_OBJ]:
config_addr = True
elif line in [SERVICE_OBJ, SERVICEGRP_OBJ]:
config_serv = True
elif line in [IPPOOL_OBJ]:
config_ippool = True
elif line in [FIREWALL_VIP]:
config_fwvip = True
elif line in [FIREWALL_POLICY]:
config_policy = True
if line.startswith('end'):
config_addr = False
config_serv = False
config_ippool = False
config_fwvip = False
config_policy = False
in_config = config_addr or config_serv or config_ippool or config_fwvip
if in_config and line.startswith('edit'):
obj_name = line.split(' ')[1].strip('"')
if config_addr:
# Unforunately Fortigate allows duplicated object names
if obj_name not in addr_dict:
addr_dict[obj_name] = {}
elif config_serv:
serv_dict[obj_name] = {}
elif config_ippool:
ippool_dict[obj_name] = {}
elif config_fwvip:
fwvip_dict[obj_name] = {}
if in_config and line.startswith('set'):
prop = line.split(' ')[1].strip('"')
value = ' '.join(line.split(' ')[2:])
value = value.replace('"', '').replace("'", '')
if config_addr:
addr_dict[obj_name][prop] = value
elif config_serv:
serv_dict[obj_name][prop] = value
elif config_ippool:
ippool_dict[obj_name][prop] = value
elif config_fwvip:
fwvip_dict[obj_name][prop] = value
if config_policy:
if line.startswith('edit'):
rule_number = line.split(' ')[1]
status = True
source_neg = False
dest_neg = False
service_neg = False
source_name = ''
dest_name = ''
source = ''
dest = ''
sport = ''
dport = ''
protocol = ''
service = ''
comments = ''
rule_name = ''
elif line.startswith('next'):
# Create the rule
if status:
source = ','.join(source)
dest = ','.join(dest)
comments = '(Rule: ' + rule_number + ') ' + comments
dport_udp = ''
dport_tcp = ''
# In Fortigate services with source port use ":" inside the port
# these "services" are processed creating one by one rule
t_source_tcp_port = []
t_source_udp_port = []
# Let's start with IP and ICMP rules
t_service = list(service)
for i in service:
if ':' in i:
if 'tcp' in i:
t_source_tcp_port.append(i)
else:
t_source_udp_port.append(i)
t_service.remove(i)
continue
if i == 'ip':
dport = '0'
sport = '0'
protocol = 'ip'
t_service.remove(i)
elif 'icmp' in i:
dport = '0'
sport = '0'
protocol = 'icmp'
t_service.remove(i)
else:
continue
rule = policy.new_rule(source,
dest,
dport,
sport,
protocol,
action,
wildcard=False,
source_name=source_name,
dest_name=dest_name,
source_negated=source_neg,
dest_negated=dest_neg)
policy.set_name(rule_name)
policy.set_rule_comment(rule, comments)
# Continue creating TCP and/or UDP rules
for i in t_service:
if 'udp' in i:
dport_udp = dport_udp + ',' + i.split('/')[0]
continue
elif 'tcp' in i:
dport_tcp = dport_tcp + ',' + i.split('/')[0]
continue
if dport_udp != '':
sport = ''
rule = policy.new_rule(source,
dest,
dport_udp[1:],
sport,
'udp',
action,
wildcard=False,
source_name=source_name,
dest_name=dest_name,
source_negated=source_neg,
dest_negated=dest_neg)
policy.set_name(rule_name)
policy.set_rule_comment(rule, comments)
if dport_tcp != '':
sport = ''
rule = policy.new_rule(source,
dest,
dport_tcp[1:],
sport,
'tcp',
action,
wildcard=False,
source_name=source_name,
dest_name=dest_name,
source_negated=source_neg,
dest_negated=dest_neg)
policy.set_name(rule_name)
policy.set_rule_comment(rule, comments)
# Finally rules with source port
for i in t_source_udp_port:
sport = i.split('/')[0].split(':')[1]
dport = i.split('/')[0].split(':')[0]
protocol = 'udp'
rule = policy.new_rule(source,
dest,
dport,
sport,
protocol,
action,
wildcard=False,
source_name=source_name,
dest_name=dest_name,
source_negated=source_neg,
dest_negated=dest_neg)
policy.set_name(rule_name)
policy.set_rule_comment(rule, comments)
for i in t_source_tcp_port:
sport = i.split('/')[0].split(':')[1]
dport = i.split('/')[0].split(':')[0]
protocol = 'tcp'
rule = policy.new_rule(source,
dest,
dport,
sport,
protocol,
action,
wildcard=False,
source_name=source_name,
dest_name=dest_name,
source_negated=source_neg,
dest_negated=dest_neg)
policy.set_name(rule_name)
policy.set_rule_comment(rule, comments)
else:
prop = line.split(' ')[1]
value = ' '.join(line.split(' ')[2:])
value = value.replace('"', '').replace("'", '')
if prop == 'srcintf':
source_name = value
elif prop == 'dstintf':
dest_name = value
elif prop == 'srcaddr':
source = get_ips(value)
elif prop == 'dstaddr':
dest = get_ips(value)
elif prop == 'service':
service = get_service(value)
elif prop == 'comments':
comments = value
elif prop == 'label':
rule_name = value
elif prop == 'action':
if 'accept' in value:
action = True
else:
action = False
elif prop == 'status':
if 'enable' in value:
status = True
else:
status = False
elif prop == 'srcaddr-negate':
source_neg = True
elif prop == 'dstaddr-negate':
dest_neg = True
elif prop == 'service-negate':
service_neg = True
print 'ERROR: Service-negate found. This is not supported by Link'
return None
return True
if __name__ == "__main__":
d=False
policy = FWPolicy(sys.argv[1],sys.argv[1], DEBUG=d)
for_parser(sys.argv[1], policy)
policy.print_policy()
|
salesforce/smartACL
|
tests/test_smartLog.py
|
<filename>tests/test_smartLog.py
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import unittest
import sys
import os
from smartACL import smartACL
class smartTest(unittest.TestCase):
def setUp(self):
self.filetacl_basic = 'tests/test_data/test_acl_basic.diff'
self.filetacl_basic2 = 'tests/test_data/test_acl_basic2.diff'
self.filetacl_basic4 = 'tests/test_data/test_acl_basic4.diff'
self.filetacl_non = 'tests/test_data/test_acl_non.diff'
self.filetacl_non_and_con = 'tests/test_data/test_acl_non_and_con.diff'
self.filetacl_splitted = 'tests/test_data/test_acl_splitted.diff'
self.results_filetacl_basic = [{'tests/test_data/test_acl_basic.diff': []}, [1, 0, 0, 1, 1, 0, 0, 0, [], 0]]
self.results_filetacl_basic2 = [{'tests/test_data/test_acl_basic2.diff': []}, [2, 0, 0, 1, 0, 1, 0, 0, [], 0]]
self.results_filetacl_basic4 = [{'tests/test_data/test_acl_basic4.diff': ['permit tcp 10.0.0.0 0.0.0.255 192.168.3.11 0.0.0.255 eq 80']}, [2, 0, 0, 2, 1, 0, 0, 0, [], 0]]
self.results_filetacl_non = [{'tests/test_data/test_acl_non.diff': ['permit tcp 10.0.0.0 0.0.0.255 172.16.31.10 8.64.4.0 eq 80']}, [2, 0, 0, 1, 0, 0, 0, 0, [], 0]]
self.results_filetacl_non_and_con = [{'tests/test_data/test_acl_non_and_con.diff': []}, [1, 0, 0, 2, 0, 2, 0, 0, [], 0]]
self.results_filetacl_splitted = [{'tests/test_data/test_acl_splitted.diff': ['permit tcp 10.231.69.128 0.0.0.127 10.0.0.0 0.0.0.255 eq 7080']}, [2, 0, 0, 1, 0, 0, 0, 0, [], 0]]
null = open(os.devnull, 'w')
self.stdout = sys.stdout
sys.stdout = null
def test_smartLog_acl_basic(self):
self.assertEqual(smartACL.smartLog(self.filetacl_basic), self.results_filetacl_basic)
def test_smartLog_acl_basic2(self):
self.assertEqual(smartACL.smartLog(self.filetacl_basic2), self.results_filetacl_basic2)
def test_smartLog_acl_basic4(self):
self.assertEqual(smartACL.smartLog(self.filetacl_basic4), self.results_filetacl_basic4)
def test_smartLog_acl_non(self):
self.assertEqual(smartACL.smartLog(self.filetacl_non), self.results_filetacl_non)
def test_smartLog_acl_non_and_con(self):
self.assertEqual(smartACL.smartLog(self.filetacl_non_and_con), self.results_filetacl_non_and_con)
def test_smartLog_acl_splitted(self):
self.assertEqual(smartACL.smartLog(self.filetacl_splitted), self.results_filetacl_splitted)
def tearDown(self):
sys.stdout = self.stdout
if __name__ == '__main__':
unittest.main()
|
salesforce/smartACL
|
smartACL/smartACL.py
|
<filename>smartACL/smartACL.py<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import sys
import os
import glob
import argparse
from copy import deepcopy
import link_cisco # This import needs to be before linkdef
import link_juniper
import link_pol
import tools # This import needs to be before linkdef
try:
import ipaddr
except:
try:
import third_party.ipaddr as ipaddr
except:
raise ImportError("The ipaddr module is not installed, exiting...")
from linkdef import *
def smartCheck(policy_del, policy_add,
list_rules_match_add=None,
matched_rules_extended=None,
subpolicy=False,
print_add_matches=False,
print_progress=False,
DEBUG=False):
"""
smartCheck will compare two policies trying to find if we can remove from the first policy rules that they are already included in the second one.
:param policy_del: Rules that usually we want to check if we can delete
:param policy_add: Rules that usually we want to check if they "cover" rules to be deleted
:param list_rules_match_add: (OUTPUT Parameter) list of all rules matching the "del" rule. Usually this parameter is internal and it is only needed for non-contiguous wildcard and split networks
:param matched_rules_extended: (OUTPUT Parameter) dictionary with rules can be removed and their matches
:param subpolicy: True when checking a subpolicy created from a parent one. For example when a big network needs to be matched by split networks
:param print_add_matches: Switch to see which rules match the one to be removed
:param print_progress: Swith for a very verbose mode
:param DEBUG: debug flag
:return: List of rules matched
"""
def _is_any(wild, ip):
if wild:
return ip == '0.0.0.0/255.255.255.255'
else:
return ip == '0.0.0.0/0.0.0.0'
def _is_ip_equal(ip1, ip2):
if ip1 == ip2:
return True
try:
ipa1 = ipaddr.IPv4Network(ip1)
ipa2 = ipaddr.IPv4Network(ip2)
except:
return False
return ipa1 == ipa2
tools.DEBUG(DEBUG, 'smartCheck', 'Entering smartCheck. subpolicy:', subpolicy)
# Sentinel values
if list_rules_match_add is None:
list_rules_match_add = []
if matched_rules_extended is None:
matched_rules_extended = {}
# It reduces complexity if policy_add is always split for non-cont
if not subpolicy:
policy_add.split_non_contiguous_wild()
rules_to_remove = []
rules_not_matched = []
a = policy_del.get_rules_number()
for x in range(1, a+1):
if print_progress and not subpolicy:
print 'Processing rule:', x, 'out of', a
rule_found = False
non_contigouos_found = False
fake_any = False
is_0_any = True
ruled = policy_del.get_rule(x)
tools.DEBUG(DEBUG, 'smartCheck', 'get_rule:', ruled)
if ruled[0] in rules_not_matched:
continue
# Usually the last rule it's a DENY IP ANY ANY LOG, if this is the case, we skip it
if x == a:
if _is_any(ruled[7], ruled[1]) and _is_any(ruled[7], ruled[2]) and not ruled[6]:
continue
if ruled[0].startswith('^'): # Especial rule, usually empty or inactive
list_rules_match_add.append(ruled[0])
rule_found = True
# if DSMO rule was split and one of the networks didn't match. NOT continue
if subpolicy:
if ruled[0].startswith('split dsmo rule'):
if ruled[0].split('}')[1] in rules_not_matched:
continue
wildcard = ruled[7]
# The wildcard 0.0.0.0 (host wildcard) is not working with ipaddress library as wildcard, only as netmask
# the same happens with 255.255.255.255 so:
# 0.0.0.0 wildcard -> 255.255.255.255 netmask
# 255.255.255.255 wilcard -> 0.0.0.0 netmask
if wildcard and (ruled[1] == '0.0.0.0/0.0.0.0' or ruled[2] == '0.0.0.0/0.0.0.0'):
# Link use 0.0.0.0 usually as ANY. This is the specific case when the host 0.0.0.0/32 is being request to be checked
is_0_any = False
if not rule_found and (ruled[1] == '' or ruled[2] == ''): # Empty rule
continue
sIP = ruled[1]
if wildcard and '/0.0.0.0' in sIP:
sIP = sIP.split('/')[0] + '/255.255.255.255'
elif wildcard and sIP == '0.0.0.0/255.255.255.255':
sIP = sIP.split('/')[0] + '/0'
else:
if wildcard:
non_contigouos_found = not tools.wild_is_contiguous(sIP.split('/')[1])
dIP = ruled[2]
if wildcard == True and '/0.0.0.0' in dIP:
dIP = dIP.split('/')[0] + '/255.255.255.255'
elif wildcard == True and dIP == '0.0.0.0/255.255.255.255':
dIP = dIP.split('/')[0] + '/0'
else:
if wildcard:
non_contigouos_found = non_contigouos_found or not tools.wild_is_contiguous(dIP.split('/')[1])
dPort = ruled[3]
sPort = ruled[4]
proto = ruled[5]
action = ruled[6]
# If the rule that we are checking has non-cont wild, we need to split it and check again
if non_contigouos_found:
# If this is a subpolicy (policy_temp) and we find a non-cont, we can't continue checking
# in subpolicies are not allowed non-cont
if subpolicy:
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return []
policy_del_temp = FWPolicy('del-temp', 'temp', DEBUG)
policy_del_temp.new_rule(ruled[1], ruled[2], ruled[3], ruled[4], ruled[5], ruled[6], ruled[7])
policy_del_temp.split_non_contiguous_wild()
tools.DEBUG(DEBUG, 'smartCheck', 'Non contiguous found. Splitting policy.', ruled)
if policy_del_temp.check_if_any_non_contiguous():
tools.DEBUG(DEBUG, 'smartCheck', 'Non contiguous found after policy splitting. Can not continue.', ruled)
if subpolicy:
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return []
else:
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
rules_not_matched.append(ruled[0])
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
else:
# When a rule is split, all the new rules will have the same name of the parent rule
# While this is the expected behaviour, it creates a problem, because we need to match
# the whole new temp policy, to know that the parent rule is fully covered
# To do that, we need to rename the temp-policy with different names
policy_temp_len = policy_del_temp.get_rules_number()
for icont in range(1, policy_temp_len + 1):
policy_del_temp.set_rule_name(icont, 'split dsmo rule - ' + str(icont) + ' }' + ruled[0])
tempPolicy_rules_matched = smartCheck(policy_del_temp, policy_add, list_rules_match_add, matched_rules_extended, subpolicy=True, print_add_matches=False, DEBUG=DEBUG)
if len(tempPolicy_rules_matched) == policy_temp_len:
rule_found = True
else:
rules_not_matched.append(ruled[0])
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
elif not rule_found:
if dPort != '0' and sPort != '0':
tools.DEBUG(DEBUG, 'smartCheck', 'request check flow:', sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto)
check1 = policy_add.link(sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto, show_deny=True, hide_allow_all=False, strict_search=True, is_0_any=is_0_any)
tools.DEBUG(DEBUG, 'smartCheck', 'requested check flow answer', check1)
else:
'''
When dport or sport are 0, we want to check ALL ports, so ALL ports should be allowed. When LINK sees a 0
in port (destination or source) is going to match with ANY rule that matches source/destination IPs, because 0 in port means ANY rule.
In this case, we need something different, 0 it doesn't mean any, it means ALL. So, we have to perform a STRICT SEARCH:
- Check the first rule matched
- If we hit a DENY, clearly ALL ports are NOT allowed.
- If we hit any other rule, we need to verify if would need to catch any DENY that could hit (NO STRICT SEARCH)
'''
tools.DEBUG(DEBUG, 'smartCheck', 'dport or sport is 0. Checking ALL')
tools.DEBUG(DEBUG, 'smartCheck', 'request check flow:', sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto)
check1 = policy_add.link(sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto, show_deny=True, hide_allow_all=False, strict_search=True, is_0_any=is_0_any)
tools.DEBUG(DEBUG, 'smartCheck', 'dport/sport != 0 first requested check flow answer', check1)
if len(check1) > 0:
fake_any = False
'''
There is a candidate rule for ALL ports, now it's time to check if there is ANY DENY above that could affect
DENY HUNT Example:
ACL1:
permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127
ACL2:
deny tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127 eq 22
permit tcp 10.230.0.0 0.0.0.127 10.240.0.0 0.0.0.127
The line1 in ACL1 will match with line2 in ACL2. After that, we need to check if a DENY rule is matching also before the permit
'''
rule_matched = policy_add.get_rule(check1[0])
if rule_matched[6] or True: # Permit = TRUE
tools.DEBUG(DEBUG, 'smartCheck', 'request check flow (DENY HUNT):', sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto)
check2 = policy_add.link(sIP.split('/')[0], dIP.split('/')[0], dPort, sPort, proto, show_deny=True, hide_allow_all=False, strict_search=False, is_0_any=is_0_any)
tools.DEBUG(DEBUG, 'smartCheck', 'dport/sport != 0 second requested check flow answer (DENY HUNT)', check2)
if len(check2) > 0:
for i in check2:
rule_matched2 = policy_add.get_rule(i)
# If there was a match in the 'DENY HUNT' we need to be sure that:
# - if the original rule is an ACCEPT, the HUNT is for a DENY
# - if the original rule is a DENY, the HUNT is for an ACCEPT
if ((rule_matched[6] and not rule_matched2[6]) or
(not rule_matched[6] and rule_matched2[6])):
tools.DEBUG(DEBUG, 'smartCheck', 'Matched DENY. FAKE ANY')
# We found a rule matching with a DENY ABOVE the ANY, so, the ANY is "fake"
fake_any = True
break
matching_action = False
if len(check1) > 0 and not fake_any:
rule_matched = policy_add.get_rule(check1[0])
matching_action = action == rule_matched[6]
if matching_action:
# Action
if action != rule_matched[6]:
continue
# Adding every matching
t = rule_matched[0]
if t not in list_rules_match_add:
list_rules_match_add.append(t)
tools.DEBUG(DEBUG, 'smartCheck', 'Matching rule', check1, 'subpolicy', subpolicy, 'list_rules_match_add:', list_rules_match_add)
# Check is the src/dst that we are looking for is exactly the same we found
if not (_is_ip_equal(ruled[1], rule_matched[1]) and _is_ip_equal(ruled[2], rule_matched[2])):
# When smartCheck is called, all non-contiguous wildcards are split, so if at this point
# we found a match, either in the del_policy or in the add_policy, the rule can't be marked as
# rule match.
if wildcard:
if not _is_any(rule_matched[7], rule_matched[1]):
non_contigouos_found = not tools.wild_is_contiguous(rule_matched[1].split('/')[1])
if not _is_any(rule_matched[7], rule_matched[2]):
non_contigouos_found = non_contigouos_found or not tools.wild_is_contiguous(rule_matched[2].split('/')[1])
# If there is a non-cont wild in the matched rule, we need to go to the next rule, we can't do more
if non_contigouos_found:
t = rule_matched[0]
if t in list_rules_match_add:
list_rules_match_add.remove(t)
continue
tools.DEBUG(DEBUG, 'smartCheck', 'working with IPs', sIP, dIP, rule_matched[1], rule_matched[2])
n1s = ipaddr.IPv4Network(sIP)
n1d = ipaddr.IPv4Network(dIP)
if _is_any(rule_matched[7], rule_matched[1]):
n2s = ipaddr.IPv4Network('0.0.0.0/0')
else:
if '/0.0.0.0' in rule_matched[1]: # This will happen only with wildcard = True
# The wildcard 0.0.0.0 (host wildcard) is not working with ipaddress library
n2s = ipaddr.IPv4Network(rule_matched[1].split('/')[0] + '/255.255.255.255')
else:
n2s = ipaddr.IPv4Network(rule_matched[1])
if _is_any(rule_matched[7], rule_matched[2]):
n2d = ipaddr.IPv4Network('0.0.0.0/0')
else:
if '/0.0.0.0' in rule_matched[2]:
# The wildcard 0.0.0.0 (host wildcard) is not working with ipaddress library
n2d = ipaddr.IPv4Network(rule_matched[2].split('/')[0] + '/25172.16.31.10')
else:
n2d = ipaddr.IPv4Network(rule_matched[2])
if n1s.compare_networks(n2s) < 0 and n1s != ipaddr.IPv4Network('0.0.0.0/0'):
new_sources = list(n1s.address_exclude(n2s))
else:
new_sources = [n1s]
if n1d.compare_networks(n2d) < 0 and n1d != ipaddr.IPv4Network('0.0.0.0/0'):
new_dest = list(n1d.address_exclude(n2d))
else:
new_dest = [n1d]
tools.DEBUG(DEBUG, 'smartCheck', 'working with IPs (2)', sIP, dIP, rule_matched[1], rule_matched[2], new_sources, new_dest)
if new_sources[0] != n1s or new_dest[0] != n1d:
tools.DEBUG(DEBUG, 'smartCheck', 'Creating new policy with smaller network')
tools.DEBUG(DEBUG, 'smartCheck', 'Sources:', new_sources, 'A:', new_sources[0], type(new_sources[0]), 'B:', n1s, type(n1s))
tools.DEBUG(DEBUG, 'smartCheck', 'Dest', new_dest)
policy_del_temp = FWPolicy('del-temp', 'temp', DEBUG)
for new_s in new_sources:
for new_d in new_dest:
irule_number = policy_del_temp.new_rule(str(new_s.with_hostmask) if wildcard else str(new_s.with_netmask),
str(new_d.with_hostmask) if wildcard else str(new_d.with_netmask),
ruled[3], ruled[4], ruled[5], ruled[6], ruled[7])
# Check comment for non-contiguous rule to understand the naming of the rules
policy_del_temp.set_rule_name(irule_number, 'split rule -' + str(irule_number))
# We are going to check a subpolicy, so from a rule partially matched, we create a subpolicy
# with all networks not mached. And start again, if in this case all of them are matched
# then the original rule can be removed
tempPolicy_rules_matched = smartCheck(policy_del_temp, policy_add, list_rules_match_add, matched_rules_extended, subpolicy=True, print_add_matches=False, DEBUG=DEBUG)
if len(tempPolicy_rules_matched) == policy_del_temp.get_rules_number():
rule_found = True
else:
tools.DEBUG(DEBUG, 'smartCheck', tempPolicy_rules_matched, '!=', policy_del_temp.get_rules_number(), 'Not all policy temp matched. Parent rule not mached.')
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
if ruled[0].startswith('split dsmo rule'):
rules_not_matched.append(ruled[0].split('}')[1])
else:
rules_not_matched.append(ruled[0])
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
else:
# If there isn't any smaller network to check the whole rule is a match
rule_found = True
else:
rule_found = True
else: # if not matching_action
tools.DEBUG(DEBUG, 'smartCheck', 'Not matching ACTION')
if subpolicy:
tools.DEBUG(DEBUG, 'smartCheck', 'One rule not matched in subpolicy')
# list_rules_match_add can't be cleared with assigning an empty value because
# is a mutable object, so assigning an empty value will create a new object instead
# of removin the old one
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return []
# If we find a line that is not matched we need to be sure that line is not part of a bigger rule
# for example a DSMO line
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
rules_not_matched.append(ruled[0])
if ruled[0] in rules_to_remove:
rules_to_remove.remove(ruled[0])
if rule_found and len(list_rules_match_add) > 0:
if ruled[0] not in rules_to_remove:
rules_to_remove.append(ruled[0])
if not subpolicy:
if print_add_matches:
print 'RULE MATCHED!'
print 'Rule to be removed:', ruled[0]
print 'Rules to be added: ', '\n\t\t '.join(list_rules_match_add)
print '-------------------------------'
matched_rules_extended[ruled[0]] = '\n'.join(list_rules_match_add)
while len(list_rules_match_add) > 0:
list_rules_match_add.pop()
return rules_to_remove
def smartLog(log_file, check_fakes=True, print_add_matches=False, print_removed_rules='flow', outprint=True, acldir='', verbose=False, ignore_acl_with_remark='', DEBUG=False):
"""
Check a diff output file for Cisco ACL to check the removed ACL. This method will work only with Cisco ACL
:param log_file: diff file
:param check_fakes: Switch to check lines syntactically equal
:param print_add_matches: Switch to see every rule matched
:param print_removed_rules: Switch to see every rule removed
:param outprint: If we want to print the output (so it can be use as module)
:param verbose: Verbose switch
:param ignore_acl_with_remark: ACLs containing this remark will be ignored
:param DEBUG: Debug switch
:return: List of lists with all data
"""
num_adds_comments = 0
num_del_comments = 0
num_blank_lines_add = 0
num_blank_lines_del = 0
num_adds = 0
num_dels = 0
inumline = 0
num_fake_del = 0
num_non_impact_del = 0
add_lines = {}
del_lines = {}
using_dir = False
acl = log_file
add_lines[acl] = []
del_lines[acl] = []
num_ignored_rules = 0
matched_ignored_acl = []
if DEBUG:
tools.DEBUG(DEBUG, 'smartLog', 'Entering smartLog')
if acldir is not None and acldir != '':
using_dir = True
with open(log_file) as file:
for line in file:
inumline += 1
line_s = line.split()
if line.startswith('Index'):
# A new ACL begins, so we check if the previous one should be ignored
acl = line_s[1].split('/')[-1]
add_lines[acl] = []
del_lines[acl] = []
if using_dir:
if not add_lines[acl]:
if acl in os.listdir(acldir):
add_lines[acl] = acldir + '/' + acl
else:
# ACL not found. Raise error.
print '[ERROR] Can\'t find the ACL:', acl, 'in directory', acldir
if __name__ == "__main__":
print 'Press Enter to continue...'
raw_input()
elif line.startswith('+'):
if not line.startswith('+++'):
num_adds += 1
if 'remark' in line_s or '+remark' in line_s:
if ignore_acl_with_remark != '' and ignore_acl_with_remark in line:
matched_ignored_acl.append([line.strip(), acl, inumline])
else:
num_adds_comments += 1
elif len(line_s) == 1:
num_blank_lines_add += 1
elif 'permit' in line:
add_lines[acl].append(line[1:].strip())
if line.startswith('-'):
if not line.startswith('---'):
num_dels += 1
if 'remark' in line_s or '-remark' in line_s:
if ignore_acl_with_remark != '' and ignore_acl_with_remark in line:
matched_ignored_acl.append([line.strip(), acl, inumline])
else:
num_del_comments += 1
elif len(line_s) == 1:
num_blank_lines_del += 1
elif 'permit' in line:
del_lines[acl].append(line[1:].strip())
file.close()
# If an ACL was marked as "ignored" we need to remove it
if len(matched_ignored_acl) > 0:
for i in matched_ignored_acl:
if i[1] in del_lines:
num_ignored_rules += len(del_lines[i[1]])
del del_lines[i[1]]
equal = False
# Checking if there is any equal line (exact same line)
if check_fakes and not using_dir:
for acl, del_li in del_lines.iteritems():
del_lines_acl = del_li[:]
for lined in del_lines_acl:
lined = lined.strip()
for linea in add_lines[acl]:
linea = linea.strip()
if lined == linea:
equal = True
break
if equal:
equal = False
num_fake_del += 1
del_lines[acl].remove(lined)
#add_lines[acl].remove(lined) We don't need to remove from the add
for acl_name, acls in del_lines.iteritems():
if len(del_lines[acl_name]) > 0:
policy_del = FWPolicy('del', acl_name, DEBUG)
link_cisco.acl_parser(acls, policy_del, DEBUG=DEBUG)
policy_add = FWPolicy('add', acl_name, DEBUG)
link_cisco.acl_parser(add_lines[acl_name], policy_add, DEBUG=DEBUG)
rules_to_remove = smartCheck(policy_del, policy_add, print_add_matches=print_add_matches, print_progress=verbose, DEBUG=DEBUG)
for i in rules_to_remove:
num_non_impact_del += 1
if DEBUG:
print '[DEBUG]', del_lines[acl_name]
print '[DEBUG][smartLog]Rules to remove:', i
del_lines[acl_name].remove(i)
num_dels_real = 0
for i, v in del_lines.iteritems():
num_dels_real += len(v)
# --------------- Printing output ---------------
if outprint:
print "Number of lines to be added (+): ", num_adds
print "-------------------------------------------"
print "Number of remarks:", num_adds_comments
print "Number of blank lines:", num_blank_lines_add
print
print
print "Number of lines to be removed (-): ", num_dels
print "-------------------------------------------------"
if len(matched_ignored_acl) > 0:
print
print "THERE ARE IGNORED ACLS!!!!!"
for i in matched_ignored_acl:
print "ACL:", i[1], "REMARK:", i[0], " Diff file line:", i[2]
print
print "Total IGNORED REMOVED LINES inside ACLs:", num_ignored_rules
print
print "Number of lines reordered:", num_fake_del
print "Number of lines shadowed:", num_non_impact_del
print "Number of remarks removed:", num_del_comments
print "Number of blank lines removed:", num_blank_lines_del
print
i = 0
for acl, rule in del_lines.iteritems():
i += len(rule)
print "Number of FLOWS REALLY removed:", i
if print_removed_rules == 'rack':
for acl, rule in del_lines.iteritems():
if len(rule) > 0:
print 'ACL:', acl
for i in rule:
print '- ' + i + (' ------ WIDE ACE REMOVED! ------' if 'any' in i else ' ')
elif print_removed_rules == 'flow':
# We need to change the list
rule_list = {}
for acl, rule in del_lines.iteritems():
if len(rule) > 0:
for i in rule:
if i not in rule_list:
rule_list[i] = [acl]
else:
if acl not in rule_list[i]:
rule_list[i].append(acl)
for rule, acl in rule_list.iteritems():
# Usually wide opened rules that are removed (ANY -> IP, IP -> ANY) are "dangerous". They are shown the first ones
print 'ACE:', rule, '------ WIDE ACE REMOVED! ------' if 'any' in rule else ' '
for i in acl:
print '-', i
print
return [del_lines, [num_adds, num_adds_comments, num_blank_lines_add,
num_dels, num_fake_del, num_non_impact_del, num_del_comments, num_blank_lines_del,
matched_ignored_acl, num_ignored_rules]]
def smartShadow2(policy, print_add_matches=False, outprint=True, verbose=False, DEBUG=False):
"""
Check if inside an ACL there are any rules shadowed
:param policy: Policy or file to check
:param print_add_matches: Switch to see all matches
:param outprint: If we want to print the output (so it can be use as module)
:param verbose: Verbose switch
:param DEBUG: DEBUG switch
:return: List with data for shadowed rules
"""
def join_matched_rules(policy, rule_list):
"""
Internal function to join Juniper split rules into the real term.
:param policy: initial policy
:param rule_list: list of rules that we want to check to join
:return:
"""
# Counting matched child rules
full_rule = {}
for i, j in rule_list.iteritems():
if '{' in i:
trule = i.split('{')
if trule[0] + '{' + trule[1] in full_rule:
full_rule[trule[0] + '{' + trule[1]].append(trule[2])
else:
full_rule[trule[0] + '{' + trule[1]] = [trule[2]]
# Checking if total number of child rules is equal to number of child matches and if TRUE, then remove it
for i, j in full_rule.iteritems():
if len(j) == policy.get_number_split_rules(i):
t_rules_matched = {}
# for i2, j2 in rule_list.iteritems():
# To avoid problems with testing we need to sort rule_list here (it's a dictionary so no order)
for i2 in sorted(rule_list, key=rule_list.__getitem__):
j2 = rule_list[i2]
if i in i2:
if i.split('{')[0] in t_rules_matched:
t_rules_matched[i.split('{')[0]] = t_rules_matched[i.split('{')[0]] + '\n' + j2
else:
t_rules_matched[i.split('{')[0]] = j2
else:
t_rules_matched[i2] = j2
rule_list = dict(t_rules_matched)
return rule_list
if policy.get_rules_number() == 0:
return [0, {}, 0, {}]
if DEBUG:
tools.DEBUG(DEBUG, 'smartShadow', 'Inside smartShadow')
add_lines = []
del_lines = []
rules_to_remove_list = []
rules_matched = {}
# Checking if there are any duplicated rule
rules = policy.get_rules()
rules_t = rules[:]
for rule in rules:
rule_data = rule.get_rule()
rules_t.remove(rule)
rule_data_t = None
for rule_t in rules_t:
if rule_t.compare(rule) > 0:
rule_data_t = rule_t.get_rule()
break
if rule_data_t is None:
add_lines.append(rule)
del_lines.append(rule)
else:
rules_matched[rule_data[0]] = rule_data_t[0]
if DEBUG:
tools.DEBUG(DEBUG, 'smartShadow', 'Rules already matched', rules_matched)
'''
There are two different kind of shadow:
- Two rules with the same action allowing the same flow, so the first will always take precedence over the second one
- One rule with DENY and other rule BELOW allowing the same traffic (and exactly the same)
The first type of shadowing is checked here
'''
total_rules = len(del_lines)
if outprint:
print 'Checking duplicated shadowing...'
print 'Number of rules to process:', total_rules
for inum, acl in enumerate(del_lines):
policy_del = FWPolicy('del', policy, DEBUG)
policy_del.set_all_rules([acl])
#link_cisco.acl_parser([acl], policy_del, DEBUG)
temp_add = add_lines[:]
temp_add.remove(acl)
# To avoid "double match" with a shadowed rule, the ones that they are already identified as shadowed shouldn't be included again.
if len(rules_to_remove_list) > 0:
for i in rules_to_remove_list:
for i2 in temp_add:
data = i2.get_rule()
if data[0] == i:
temp_add.remove(i2)
break
policy_add = FWPolicy('add', policy, DEBUG)
policy_add.set_all_rules(temp_add)
if DEBUG:
tools.DEBUG(DEBUG, 'smartShadow', 'processing rule:', inum, ' ', acl.get_rule())
tools.DEBUG(DEBUG, 'smartShadow', 'DEL POLICY')
policy_del.print_policy()
tools.DEBUG(DEBUG, 'smartShadow', 'ADD POLICY')
policy_add.print_policy()
if outprint:
sys.stdout.write('\r Processing rule {} of {} '.format(inum + 1, total_rules))
if verbose and outprint:
print 'ACL: ', acl.get_rule()
rules_to_remove = smartCheck(policy_del, policy_add, matched_rules_extended=rules_matched, print_add_matches=False, DEBUG=DEBUG)
tools.DEBUG(DEBUG, 'smartShadow', 'rules_to_remove', rules_to_remove)
if len(rules_to_remove) > 0:
if verbose and outprint:
print 'Rule shadowed:', rules_to_remove
for i in rules_to_remove:
rules_to_remove_list.append(i)
del policy_del
del policy_add
'''
Now we have to check the second type of shadowing
'''
if outprint:
print 'Checking DENY shadowing...'
rules_matched2 = {}
rules_to_remove_list2 = []
temp_del = del_lines[:]
for inum, acl in enumerate(del_lines):
data_acl = acl.get_rule()
if data_acl[6]: # Action: PERMIT
temp_del.remove(acl)
continue
acl.set_action(True) # We change the Action for the Check
policy_del = FWPolicy('del', policy, DEBUG)
policy_del.set_all_rules([acl])
temp_add = temp_del[:] # temp_add will have the rest of the rules below the DENY
temp_add.remove(acl)
# To avoid "double match" with a shadowed rule, the ones that they are already identified as shadowed shouldn't be included again.
if len(rules_to_remove_list2) > 0:
for i in rules_to_remove_list2:
for i2 in temp_add:
data = i2.get_rule()
if data[0] == i:
temp_add.remove(i2)
break
policy_add = FWPolicy('add', policy, DEBUG)
policy_add.set_all_rules(temp_add)
if DEBUG:
tools.DEBUG(DEBUG, 'smartShadow', 'processing rule:', inum, ' ', acl.get_rule())
tools.DEBUG(DEBUG, 'smartShadow', 'DEL POLICY')
policy_del.print_policy()
tools.DEBUG(DEBUG, 'smartShadow', 'ADD POLICY')
policy_add.print_policy()
if outprint:
sys.stdout.write('\r Processing DENY rule {}'.format(inum + 1))
if verbose and outprint:
print 'ACL: ', acl.get_rule()
# Important: we swap policy_add with policy_del in the call of smartCheck. We want to check the policy against
# the deny rule, not in the other way around. So, it's like the "new" policy would be the DENY rule
rules_to_remove2 = smartCheck(policy_add, policy_del, matched_rules_extended=rules_matched2, print_add_matches=False, DEBUG=DEBUG)
if len(rules_to_remove2) > 0:
if verbose and outprint:
print 'Rule shadowed:', rules_to_remove2
for i in rules_to_remove2:
rules_to_remove_list2.append(i)
tools.DEBUG(DEBUG, 'smartShadow', 'rules_to_remove2', rules_to_remove2)
tools.DEBUG(DEBUG, 'smartShadow', 'rules_to_remove_list2', rules_to_remove_list2)
del policy_del
del policy_add
'''
After both checks where done, we have a list of matched rules than can be removed in rules_matched and rules_matched2. But, we need to review these lists to see
if all "child-rules" of a rule were matched, so then, we only print the full rule. This is usually the case for JCL ACLs, where the lines
were split to have only one source and only one destination.
'''
rules_matched = join_matched_rules(policy, rules_matched)
rules_matched2 = join_matched_rules(policy, rules_matched2)
tools.DEBUG(DEBUG, 'smartShadow', 'rules_matched', rules_matched)
tools.DEBUG(DEBUG, 'smartShadow', 'rules_matched2', rules_matched2)
if outprint:
sys.stdout.flush()
print
print '----------- Summary -----------'
print 'List of rules that can be removed (same permit or deny flow shadowed): (', len(rules_matched), ')'
print
for i, j in rules_matched.iteritems():
if '{' in i:
trule = i.split('{')
tIPs = trule[3].replace("'", "").replace(' ', '')
print ' Compound Rule:'
print ' '*4, trule[0], 'Source IP:', tIPs.split('[')[1].split(',')[0], 'Destination IP:', tIPs.split(',')[1].split(']')[0]
print ' Partially matched',
else:
print ' Rule:'
print ' '*4, i
print ' Fully matched',
if '{' in j:
trule = j.split('{')
tIPs = trule[3].replace("'", "")
print 'within compound rule:'
print ' '*4, trule[0], 'Source IP:', tIPs.split('[')[1].split(',')[0], 'Destination IP:', tIPs.split(',')[1].split(']')[0]
print '------'
else:
print 'with rule/s:'
print '\n'.join([' '*5 + l for l in j.split('\n')])
print '------'
print
print 'List of rules that can be removed (DENY shadowing): (', len(rules_matched2), ')'
print
for i, j in rules_matched2.iteritems():
if '{' in i:
trule = i.split('{')
tIPs = trule[3].replace("'", "").replace(' ', '')
print ' Compound Rule:'
print ' '*4, trule[0], 'Source IP:', tIPs.split('[')[1].split(',')[0], 'Destination IP:', tIPs.split(',')[1].split(']')[0]
print ' Partially matched',
else:
print ' Rule:'
print ' '*4, i
print ' Fully matched',
if '{' in j:
print 'within compound rule:'
rule_lines = j.split('\n')
for j2 in rule_lines:
trule = j2.split('{')
tIPs = trule[3].replace("'", "")
print ' '*4, trule[0], 'Source IP:', tIPs.split('[')[1].split(',')[0], 'Destination IP:', tIPs.split(',')[1].split(']')[0]
print '------'
else:
print 'with rule/s:'
print '\n'.join([' ' * 5 + l for l in j.split('\n')])
print '------'
print '-------------------'
return [rules_matched, rules_matched2]
def smartCompare2(p_policy1, p_policy2, verbose=False, only_different=False, outprint=True, ignore_lines='', ignoredeny=False, ignoreshadowed=False, DEBUG=False):
"""
Compare two ACLs to verify that they have the same flows
:param p_policy1: First file to compare
:param p_policy2: Second file to compare with
:param verbose: Verbose switch
:param only_different: Show only ACLs that they are different
:param outprint: False to avoid any output
:param DEBUG: DEBUG switch
:return: List with all data from the ACLs comparison
"""
if DEBUG:
tools.DEBUG(DEBUG, 'smartCompare', 'Inside smartCompare')
policy1 = deepcopy(p_policy1)
policy2 = deepcopy(p_policy2)
rules1 = list(policy1.get_rules())
rules2 = list(policy2.get_rules())
inumline_old = len(rules1)
inumline_new = len(rules2)
# Checking if we have a rules to ignore
number_ignored_old = 0
number_ignored_new = 0
list_ignored_rules = []
if ignore_lines != '' or ignoredeny:
ignore_lines = ignore_lines.split(',')
num_rule = policy1.get_rules_number()
while num_rule > 0:
rule = policy1.get_rule(num_rule)
if not rule[0].startswith('^'): # Disabled rules
if rule[0] in ignore_lines or (ignoredeny and not rule[6]):
tools.DEBUG(DEBUG, 'smartCompare', 'Removing ignore rule Policy1', rule[0])
policy1.remove_rule(num_rule)
number_ignored_old += 1
if rule[0] not in list_ignored_rules:
list_ignored_rules.append(rule[0])
num_rule -= 1
num_rule = policy2.get_rules_number()
while num_rule > 0:
rule = policy2.get_rule(num_rule)
if not rule[0].startswith('^'): # Disabled rules
if rule[0] in ignore_lines or (ignoredeny and not rule[6]):
tools.DEBUG(DEBUG, 'smartCompare', 'Removing ignore rule Policy2', rule[0])
policy2.remove_rule(num_rule)
number_ignored_new += 1
if rule[0] not in list_ignored_rules:
list_ignored_rules.append(rule[0])
num_rule -= 1
# We need to split any possible "multi IP" rule
# smartCompare requires one IP per line
policy1.split_ips()
policy2.split_ips()
# Checking if the last rule is the type DENY ANY ANY. If it is, we removed to avoid false positives
last_deny_old = 0
if policy1.last_deny():
last_deny_old = 1
policy1.remove_rule(policy1.get_rules_number())
last_deny_new = 0
if policy2.last_deny():
last_deny_new = 1
policy2.remove_rule(policy2.get_rules_number())
if rules1[0] == '<empty>':
rules_to_remove = []
elif rules2[0] == '<empty>':
rules_to_remove = []
else:
# We should check if every rule we want to compare is really allowed or not. If a rule is not already allowed in the "old" policy
# Why do we need to check against the second policy?
rules_shadowed = {}
if ignoreshadowed:
rules_shadowed = policy1.remove_shadowed_rules()
rules_to_remove = smartCheck(policy1, policy2, print_progress=verbose, print_add_matches=verbose, DEBUG=DEBUG)
tools.DEBUG(DEBUG, 'smartCompare', 'rules_to_remove', rules_to_remove)
# Removing any duplicated rule
rules_to_remove_t = []
for i in rules_to_remove:
if i not in rules_to_remove_t:
rules_to_remove_t.append(i)
if ignoreshadowed:
'''
For the shadowed rules we need to check if the rule that "shadowed" the one removed,
it's included in the "rules_to_remove":
- If it was included -> the one shadowed will be also included
- If it was NOT included -> the one shadowed neither
'''
for r1, r2 in rules_shadowed.iteritems():
if r2 in rules_to_remove and r1 not in rules_to_remove_t:
rules_to_remove_t.append(i)
rules_to_remove = rules_to_remove_t[:]
if outprint:
if not only_different or (only_different and (inumline_old - len(rules_to_remove) > 0)):
if ignoredeny:
print '------------------------------------------------------------------'
print 'YOU ARE IGNORING DENY RULES IN COMPARISON. RESULTS COULD BE WRONG!'
print '------------------------------------------------------------------'
print 'The following rules were ignored during the comparison:'
for i in list_ignored_rules:
print i
print '------ SmartCompare ------'
print 'Number of rules in old policy (without remarks):', inumline_old
print 'Number of rules in new policy (without remarks):', inumline_new
if ignoreshadowed:
print
print 'Number of shadowed rules IGNORED in old policy:', len(rules_shadowed)
print 'The following rules were ignored during the comparison:'
for i in rules_shadowed:
print i
iold = policy1.get_rules_number()
last_rule_printed = ''
output_to_print = []
rules_fully_matched = []
for i in xrange(1, iold+1):
rule_old = policy1.get_rule(i)
if rule_old[0] not in rules_to_remove and rule_old[0] != last_rule_printed:
if rule_old[0].split('{')[0] in rules_fully_matched:
rules_fully_matched.remove(rule_old[0].split('{')[0])
if '{' in rule_old[0]:
output_to_print.append('Compound rule: ' + rule_old[0].split('{')[0] + ' '*4 + 'Source IP: ' + rule_old[1] + ' Destination IP: ' + rule_old[2] + ' Source Port: ' + rule_old[4] + ' Destination Port: ' + rule_old[3])
else:
output_to_print.append(rule_old[0])
last_rule_printed = rule_old[0]
else:
# If the rule was split ('{' in name) then we only need to add it once
# we checked with the 1 line of the split rule
if '{' in rule_old[0]:
if rule_old[0].split('{')[2] == '1':
rules_fully_matched.append(rule_old[0].split('{')[0])
else:
rules_fully_matched.append(rule_old[0].split('{')[0])
print
print 'Number of rules shadowed in the new policy:', len(rules_fully_matched) + (last_deny_old + number_ignored_old)
print
print 'Number of rules NOT fully matched in the new policy:', inumline_old - (len(rules_fully_matched) + (last_deny_old + number_ignored_old))
print 'Rules not fully matched from OLD policy:'
for i in output_to_print:
print i
return rules_to_remove
"""
MAIN
"""
if __name__ == "__main__":
def help_message(part=''):
if part == 'smartlog':
print 'usage: smartACL.py', '--smartlog', '--diff-file <DIFF_FILE>', '[-r]', '[-p]', '[-f]'
elif part == 'smartshadow':
print 'usage: smartACL.py', '--smartshadow', '--acl-old <ACL-FILE>'
elif part == 'smartcompare':
print 'usage: smartACL.py', '--smartcompare', '--acl-old <ACL-FILE>', '--acl-new <ACL-FILE>'
else:
print 'usage: smartACL.py', '[--smartcompare|--smartshadow|--smartlog]', '[--diff-file DIFF_FILE]', '[-h]', \
'[--diff-file Diff File]', '[--acl-old ACL_FILE1]', '[--acl-new ACL_FILE2]', '[-r]', '[-p]', '[-f]', '[-d]'
print
if part == '':
print 'Mandatory arguments'
print '--smartcompare Execute smartCompare module'
print '--smartshadow Execute smartShadow module'
print '--smartlog Execute smartLog module'
print
print 'Optional arguments'
if part == '' or part == 'smartlog':
print '--diff-file Diff file'
if part == '' or part == 'smartcompare':
print '--acl-old Old ACL file or directory to compare'
elif part == 'smartshadow':
print '--acl-old ACL file'
if part == '' or part == 'smartcompare':
print '--acl-new New ACL file or directory to compare'
print '-s, --show-only-different When comparing directories will show an output only with different files'
print '-il, --ignore-line Ignore the following lines (ACL remark for Cisco or Term name for Juniper)'
print '-is, --ignore-shadowed smartCompare will perform a BASIC rule shadowing lookup and discard any found rule for the comparison'
print '--ignoredeny Ignore DENY rules. (DANGEROUS, CAN\'T SHOW FAKE RESULTS)'
print '--capirca-dir Directory containing NETWORK.net and SERVICES.svc '
if part == '' or part == 'smartlog':
if part == 'smartlog':
print '-il, --ignore-line Ignore ACL with the following remark'
print '-r, --print-removed-rules-by-file Print all rules by file that they are really going to be removed'
print '-p, --print-add-matches Print ADD matches for DEL lines'
print '-n, --no-check-fakes NO check for twin rules (exactly the same - and + in the diff file)'
print '-a, --acl-dir Directory with ALL ACLs to compare diff file'
print '--remarkasname Will use "remarks" as name of the rule for Cisco ACLs'
print '--acltype Specifiy the ACL type: acl,ncl,jcl'
print '-v, --verbose Verbose output'
print '-d, --debug'
print '-h, --help This message'
def run(dir1, dir2, op):
for file1 in dir1:
file_found = False
if os.path.isdir(file1): continue
if acltype != '':
type_ext1 = acltype
type_ext2 = acltype
else:
type_ext1 = file1.split('.')[len(file1.split('.')) - 1]
if type_ext1 not in ['acl', 'ncl', 'fcl', 'jcl', 'pol']:
print "Can't detect ACL type. Ignoring file:", file1
continue # if the file extension is not known next one
if '/' in file1:
file_name1 = file1.split('/')[len(file1.split('/')) - 1]
else:
file_name1 = file1
if op != 'smartcompare':
print '\nProcessing file: ', file1
parsed = False
policy1 = FWPolicy('', file1, debug)
if type_ext1 in ['acl', 'ncl', 'fcl']:
parsed = link_cisco.acl_parser(file1, policy1, remarkasname=args.remarkasname, DEBUG=debug)
elif type_ext1 == 'jcl':
parsed = link_juniper.jcl_parser(file1, policy1, debug)
if parsed:
policy1.split_ips()
if parsed:
smartShadow2(policy1, print_add_matches=args.printadd, verbose=args.verbose, DEBUG=args.debug)
else:
print "ERROR: Can't parse files. File:", file1
break
else:
for file2 in dir2:
if os.path.isdir(file2): continue
if acltype == '':
type_ext2 = file2.split('.')[len(file2.split('.')) - 1]
if type_ext2 not in ['acl', 'ncl', 'fcl', 'jcl', 'pol']:
print "Can't detect ACL type. Ignoring file:", file2
continue # if the file extension is not known next one
if '/' in file2:
file_name2 = file2.split('/')[len(file2.split('/')) - 1]
else:
file_name2 = file2
# If the directory is only one file, it doesn't make sense to match the names. We just use the file.
if file_name1 == file_name2 or len(dir2) == 1:
file_found = True
print '\nProcessing file: ', file1
parsed = False
policy1 = FWPolicy('', file1, debug)
if type_ext1 in ['acl', 'ncl', 'fcl']:
parsed = link_cisco.acl_parser(file1, policy1, remarkasname=args.remarkasname, DEBUG=debug)
elif type_ext1 == 'jcl':
parsed = link_juniper.jcl_parser(file1, policy1, debug)
elif type_ext1 == 'pol':
if args.capircadir is None:
print "Capirca Policies needs the parameter --capirca-dir"
continue
parsed = link_pol.pol_parser(file1, args.capircadir, policy1, debug)
policy2 = FWPolicy('', file2, debug)
if type_ext2 in ['acl', 'ncl', 'fcl']:
parsed = parsed and link_cisco.acl_parser(file2, policy2, remarkasname=args.remarkasname, DEBUG=debug)
elif type_ext2 == 'jcl':
parsed = parsed and link_juniper.jcl_parser(file2, policy2, debug)
elif type_ext2 == 'pol':
if args.capircadir is None:
print "Capirca Policies needs the parameter --capirca-dir"
continue
parsed = parsed and link_pol.pol_parser(file2, args.capircadir, policy2, debug)
if parsed:
smartCompare2(policy1, policy2, verbose=args.verbose, only_different=args.show_different, ignore_lines=args.ignore_term, ignoredeny=args.ignoredeny, ignoreshadowed=args.ignoreshadowed, DEBUG=args.debug)
else:
print "ERROR: Can't parse files. File1:", file1, "File2:", file2
break
if not file_found:
if type(dir1) is list and len(dir1) > 1:
tdir1 = '/'.join(dir1[0].split('/')[0:-1])
else:
tdir1 = dir1
if type(dir2) is list and len(dir2) > 1:
tdir2 = '/'.join(dir2[0].split('/')[0:-1])
else:
tdir2 = dir2
tfile = file1
print 'FILE:', tfile, 'in directory:', tdir1, 'was NOT found in directory:', tdir2
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-sc', '--smartcompare', dest='smartcompare', action='store_true')
parser.add_argument('-ss', '--smartshadow', dest='smartshadow', action='store_true')
parser.add_argument('-sl', '--smartlog', dest='smartlog', action='store_true')
parser.add_argument('--diff-file', dest='diff_file', metavar='Diff File', help='Diff file for smartLog')
parser.add_argument('--acl-old', dest='acl_file1', help='Old ACL file (smartShadow only use this one)', nargs='+')
parser.add_argument('--acl-new', dest='acl_file2', help='New ACL file', nargs='+')
parser.add_argument('-s', '--show-only-different', dest='show_different', help='Show only different files', action='store_true')
parser.add_argument('-il', '--ignore-line', dest='ignore_term', help='Ignore the following lines (ACL remark for Cisco or Term name for Juniper)', default='')
parser.add_argument('-is', '--ignore-shadowed', dest='ignoreshadowed', help='smartCompare will perform a BASIC rule shadowing lookup and discard any found rule for the comparison', action='store_true')
parser.add_argument('--ignore-deny', dest='ignoredeny', help='Ignore DENY rules. (DANGEROUS, CAN\'T SHOW FAKE RESULTS)', action='store_true')
parser.add_argument('-r', '--print-removed-rules-by-file', dest='printdelrack', help='(smartLog) Print all rules by file that they are really going to be removed', action='store_true')
parser.add_argument('-p', '--print-add-matches', dest='printadd', help='(smartShadow) Print ADD matches for DEL lines', action='store_true')
parser.add_argument('-n', '--no-check-fakes', dest='chkfake', help='(smartShadow) NO check for twin rules (exactly the same - than +)', action='store_true')
parser.add_argument('--remarkasname', help='Will use "remarks" as name of the rule for Cisco ACLs', action='store_true')
parser.add_argument('--acltype', help='Specifiy the ACL type: acl,ncl,jcl')
parser.add_argument('-a', '--acl-dir', dest='acldir', help='ACL Directory for smartLog')
parser.add_argument('--capirca-dir', dest='capircadir', help='Capirca definitions directory')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('-d', '--debug', dest='debug', action='store_true')
parser.add_argument('-h', '--help', dest='help', action='store_true', help='Show this help message and exit.')
args = parser.parse_args()
ops = 'smartlog' if args.smartlog else 'smartcompare' if args.smartcompare else 'smartshadow' if args.smartshadow else ''
if args.help or ops == '':
help_message(ops)
quit()
acltype = ''
if args.acltype:
acltype = args.acltype
debug = args.debug
if ops == 'smartlog':
if args.diff_file is None:
print 'ERROR: With smartLog you need to specify a diff file. Please, use --diff-file'
help_message(part='smartlog')
quit()
pr = 'flow'
if args.printdelrack:
pr = 'rack'
smartLog(args.diff_file, check_fakes=not args.chkfake, print_add_matches=args.printadd, print_removed_rules=pr, acldir=args.acldir, verbose=args.verbose, ignore_acl_with_remark=args.ignore_term, DEBUG=args.debug)
elif ops == 'smartshadow':
if args.acl_file1 is None:
print 'ERROR: With smartShadow you need to specify an ACL file using --acl-old'
help_message(part='smartshadow')
quit()
run(args.acl_file1, None, ops)
elif ops == 'smartcompare':
if args.acl_file1 is None or args.acl_file2 is None:
print 'ERROR: With smartCompare you need to specify two ACL files using --acl-old and --acl-new'
help_message(part='smartcompare')
quit()
else:
files1 = ''
files2 = ''
for i in args.acl_file1:
if '*' in i:
files1 = glob.glob(i)
else:
files1 = args.acl_file1
break
for i in args.acl_file2:
if '*' in i:
files2 = glob.glob(i)
else:
files2 = args.acl_file2
break
run(files1, files2, ops)
|
pedronogs/convert-udt-spacy
|
convert.py
|
import json
import subprocess
import spacy
import re
import argparse
# Convert NER tag to IOB format based on token and annotated text
def convertIOB(token, annotation_text, annotation_label, previous_annotation):
# If token is equal to annotated text or at least first word, return with 'B' tag
if token == annotation_text[0]:
return [token, 'B-{}'.format(annotation_label.upper())]
# Add I tag only if previous tag was B or I
elif previous_annotation in ['B', 'I']:
return [token, 'I-{}'.format(annotation_label.upper())]
return None
def main(ARGS):
udt_json = json.load(open(ARGS.input_file, "r", encoding='utf-8'))
udt_samples = udt_json['samples']
# Open IOB file for annotated data output in IOB format
f = open(ARGS.iob_file, 'w', encoding="utf-8")
# Create blank PT language class
nlp = spacy.blank("pt")
# Convert UDT NER JSON to NER in IOB format
for index, sample in enumerate(udt_samples):
doc = nlp(sample['document'].replace('|', '')) # Replace | for ''
sample_words = [w.text for w in doc if w.text != ' ' and w.text != ''] # Store all sample words in a list
# Store all sample entities in a list of lists
if 'annotation' in sample:
sample_entities = []
for entity in sample['annotation']['entities']:
entity_text = nlp(entity['text'])
entity_text = [w.text for w in entity_text if w.text != ' ' and w.text != '']
sample_entities.append([entity_text, entity['label']])
# ----------------- Ignore document without annotation ------------------------------
else:
break
# Check if token is anotated or not, if it's not, annotate it
# Each token will be added to final list according to IOB format of annotation
sample_iob = []
previous_annotation = None
for token in sample_words:
tag_added = False # Flag to indicate token has already been tagged
# Check if token is inside annotated text and convert it to IOB
for idx, annotation in enumerate(sample_entities):
if token in annotation[0]: # Check if token is in annotated text
token_iob = convertIOB(token, annotation[0], annotation[1], previous_annotation) # Convert single or multi-word token with respective IOB tag
if token_iob != None:
sample_iob.append(token_iob) # Append to annotated sample in IOB
index_token = sample_entities[idx][0].index(token)
sample_entities[idx][0][index_token] = sample_entities[idx][0][index_token].replace(token, '*') # Replace already used token in annotated text with keyword
tag_added = True
previous_annotation = token_iob[1][0] # Get last IOB tag
break
# If token is not already annotated, append with 'O' tag
if tag_added == False:
sample_iob.append([token, 'O'])
previous_annotation = 'O'
for idx, annotation in enumerate(sample_iob):
f.write('{}|{}'.format(annotation[0], annotation[1]))
# Add whitespace between annotations
if idx + 1 < len(sample_iob):
f.write(' ')
# Break line after every sentence
f.write('\n')
# Convert IOB file to spaCy binary format
# Options: s -> segmente sentences automatically, n -> group 10 sentences in a document, l -> language model
subprocess.run(['python', '-m', 'spacy', 'convert', '-s', '-n', '10', '-l', 'pt', ARGS.iob_file, '.'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Script to convert UDT's JSON exported NER annotations to IOB format and spaCy's binary format.")
parser.add_argument('-i', '--input_file', required=True, help="Path to UDT's JSON exported NER annotations file.")
parser.add_argument('-f', '--iob_file', default="output_annotations.iob", help="Path to output IOB converted file.")
ARGS = parser.parse_args()
main(ARGS)
|
guineawheek/aiotoa
|
aiotoa/http.py
|
<gh_stars>0
from asyncio import sleep
from urllib.parse import urlencode
import datetime
import json
import aiohttp
from async_timeout import timeout as atimeout
from .models import *
__all__ = ["AioTOAError", "TOASession"]
def endpoint(endp, query_params=None):
def wrapper(func):
async def new_func(self, *args, **kwargs):
model = func.__annotations__['return']
full_endpoint = endp.format(*args, **kwargs)
if query_params:
full_endpoint += "?" + urlencode({
key: kwargs[key] for key in query_params if key in kwargs and kwargs[key] is not None
})
if not str(model).startswith("typing.List"):
# since TOA always returns lists, we must handle only doing the first
data = await self.req(full_endpoint, List[model])
if not len(data):
raise AioTOAError(f"Request to {full_endpoint} returned no data!")
return data[0]
else:
return await self.req(full_endpoint, model)
return new_func
return wrapper
class AioTOAError(Exception):
pass
class TOASession:
def __init__(self, key: str, app_name: str, aiohttp_session=None, ratelimit=2, close_on_aexit=True):
self.key = key
self.app_name = app_name
self.ratelimit = ratelimit
self.last_req = datetime.datetime.now() - datetime.timedelta(seconds=ratelimit)
self.session = aiohttp.ClientSession() if not aiohttp_session else aiohttp_session
self.close_on_aexit = close_on_aexit
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
if self.close_on_aexit:
await self.session.close()
async def close(self):
await self.session.close()
async def req(self, endpoint: str, model):
if self.ratelimit:
now = datetime.datetime.now()
delta = (now - self.last_req).total_seconds()
if delta < self.ratelimit:
await sleep(self.ratelimit - delta)
self.last_req = now
if not endpoint.startswith("/"):
endpoint = "/" + endpoint
headers = {
"X-Application-Origin": self.app_name,
"X-TOA-Key": self.key,
"Content-Type": "application/json"
}
async with atimeout(5) as _, self.session.get("https://theorangealliance.org/api" + endpoint, headers=headers) as response:
# toa _still_ sometimes returns json data as text/html, making response.json() throw an exception
# _sigh_
data = json.loads(await response.text())
# toa never returns data in dicts, it's always lists
if isinstance(data, dict):
raise AioTOAError(f"Request to {endpoint} failed with {response.status} {response.reason} (data={data})")
return to_model(data, model)
async def version(self) -> APIVersion:
return await self.req("/", APIVersion)
@endpoint("/event-types")
async def event_types(self) -> List[EventType]:
pass
@endpoint("/seasons")
async def seasons(self) -> List[Season]:
pass
@endpoint("/regions")
async def regions(self) -> List[Region]:
pass
@endpoint("/leagues")
async def leagues(self) -> List[League]:
pass
@endpoint("/streams")
async def streams(self) -> List[EventLiveStream]:
pass
# /event
@endpoint("/event", query_params=("league_key", "region_key", "season_key", "type"))
async def query_events(self, league_key=None, region_key=None, season_key=None, type=None) -> List[Event]:
pass
@endpoint("/event/{0}")
async def event(self, event_key) -> Event:
pass
@endpoint("/event/{0}/matches")
async def event_matches(self, event_key) -> List[Match]:
pass
@endpoint("/event/{0}/matches/details")
async def event_match_details(self, event_key) -> List[MatchDetails]:
pass
@endpoint("/event/{0}/matches/participants")
async def event_match_participants(self, event_key) -> List[MatchParticipant]:
pass
@endpoint("/event/{0}/rankings")
async def event_rankings(self, event_key) -> List[Ranking]:
pass
@endpoint("/event/{0}/streams")
async def event_streams(self, event_key) -> List[EventLiveStream]:
pass
@endpoint("/event/{0}/teams")
async def event_teams(self, event_key) -> List[TeamEventParticipant]:
pass
@endpoint("/event/{0}/awards")
async def event_awards(self, event_key) -> List[AwardRecipient]:
pass
# /match
@endpoint("/match/high-scores")
async def match_high_scores(self, type="all") -> Match: # possible values: elims, quals, all
pass
@endpoint("/match/{0}")
async def match(self, match_key) -> Match:
pass
@endpoint("/match/{0}/matches")
async def match_details(self, match_key) -> MatchDetails:
pass
@endpoint("/match/{0}/matches/details")
async def match_participants(self, match_key) -> List[MatchParticipant]:
pass
# /team
@endpoint("/team")
async def query_teams(self, start=None, count=None) -> List[Team]:
pass
@endpoint("/team/{0}")
async def team(self, team_key) -> Team:
pass
@endpoint("/team/{0}/events/{1}")
async def team_events(self, team_key, season_key) -> List[EventEventParticipant]:
pass
@endpoint("/team/{0}/matches/{1}")
async def team_matches(self, team_key, season_key) -> List[MatchParticipant]:
pass
@endpoint("/team/{0}/awards/{1}")
async def team_awards(self, team_key, season_key) -> List[AwardRecipient]:
pass
@endpoint("/team/{0}/results/{1}")
async def team_rankings(self, team_key, season_key) -> List[Ranking]:
pass
# /web
@endpoint("/web/announcements")
async def announcements(self) -> WebAnnouncement:
pass
|
guineawheek/aiotoa
|
aiotoa/__init__.py
|
from . import *
from .http import TOASession, AioTOAError
|
ufo-github/google-sign-in
|
main.py
|
<reponame>ufo-github/google-sign-in
#!/usr/bin/python
# Copyright Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: -*- utf-8 -*-
from google.appengine.ext import vendor
vendor.add('lib')
import json
import sys
import os
from flask import Flask, request, make_response, render_template, session
from oauth2client import client
from apiclient.discovery import build
import httplib2
from google.appengine.ext import ndb
app = Flask(
__name__,
static_url_path='',
static_folder='static',
template_folder='templates'
)
app.debug = True
if os.path.isfile('client_secrets.json') is False:
sys.exit('client_secrets.json not found.')
CLIENT_ID = json.loads(open('client_secrets.json',
'r').read())['web']['client_id']
# On this sample, this is not really a secret
# Make sure to change SECRET_KEY for your own purposes
SECRET_KEY = 'abcde'
app.config.update(
SECRET_KEY=SECRET_KEY
)
# App Engine Datastore to save credentials
class CredentialStore(ndb.Model):
id_token = ndb.JsonProperty()
credentials = ndb.JsonProperty()
@classmethod
def remove(cls, key):
ndb.Key(cls.__name__, key).delete()
@app.route('/')
def index():
mode = request.args.get('mode', 'introduction')
# Sanitize `mode` parameter
if mode not in {'introduction',
'authentication_with_backends',
'authorization_client',
'authorization_server'}:
mode = 'introduction'
return render_template('index.html', client_id=CLIENT_ID, mode=mode)
@app.route('/api')
def api():
# If session doesn't include `id`, the user is not signed in
if 'id' not in session:
return make_response('Not authenticated', 401)
# Extract user id from session
sub = session.get('id')
# Obtain Datastore entry by user id
store = CredentialStore.get_by_id(sub)
# If the store doesn't include `credentials`, user is not authorized
if store.credentials is None:
# Not authorized for offline use
return make_response('access_token not stored', 401)
# Deserialize the credential object
credentials = client.Credentials.new_from_json(store.credentials)
http = credentials.authorize(httplib2.Http())
drive = build('drive', 'v3', http=http)
files = drive.files().list(fields='files').execute()
# API Client Library takes care of refreshing access token behind
# the scenese. Store the Credential Object in case access_token is
# replaced with a new one.
store.credentials = credentials.to_json()
store.put()
# Respond with list of files from Google Drive
return make_response(json.dumps(files.get('files', [])), 200)
@app.route('/validate', methods=['POST'])
def validate():
# POST should include `id_token`
id_token = request.form.get('id_token', '')
# In order to validate the user, check if
# - the id_token contains valid JWT signature
# - `aud` matches the client id
# - `exp` timestamp doesn't exceed current time
# - `iss` matches with 'accounts.google.com'
# or 'https://accounts.google.com'
# - also, check `hd` if applicable
# Failure to properly validate the id_token
# could **open your app to attack**.
# Verify the `id_token` using API Client Library
# This library covers checking signature, aud, exp.
idinfo = client.verify_id_token(id_token, CLIENT_ID)
# Additional verification: See if `iss` matches Google issuer string
if idinfo['iss'] not in ['accounts.google.com',
'https://accounts.google.com']:
return make_response('Wrong Issuer.', 401)
# Extract user id from `id_token`'s content
sub = idinfo['sub']
# Obtain Datastore entry by user id
store = CredentialStore.get_by_id(sub)
# If the store is `None`, the user hasn't previously signed-in
if store is None:
# Create a new store
store = CredentialStore(id=sub, id_token=idinfo)
else:
# Append `id_token` entry
store.id_token = idinfo
# Save the store
store.put()
# Start a session
session['id'] = sub
return make_response('', 200)
@app.route('/code', methods=['POST'])
def code():
# If session doesn't include `id`, the user is not signed in
if 'id' not in session:
return make_response('Not authenticated', 401)
user_id = session.get('id', None)
# POST should include `code`
code = request.form.get('code', '')
# Exchange the `code` with credential information
credentials = client.credentials_from_clientsecrets_and_code(
'client_secrets.json', scope='', code=code)
# If the credential is `None`
if credentials is None:
# Means it failed to obtain the credential object
return make_response('Invalid authorization code.', 401)
# Extract user id from `id_token`'s content
sub = credentials.id_token['sub']
# If the requesting user id doesn't match with the signed-in user id
if user_id != sub:
# Reject
return make_response("User doesn't match", 401)
# Obtain Datastore entry by user id
store = CredentialStore.get_by_id(sub)
# If the store is `None`, the user hasn't previously signed-in
if store is None:
return make_response('Authorization before authentication.', 401)
# You could optionally authenticate if it makes sense.
# Just chose not to do so here because Google recommendation is
# to separate AuthN and AuthZ.
# Serialize the credential object and save
store.credentials = credentials.to_json()
store.put()
return make_response('', 200)
@app.route('/signout', methods=['POST'])
def signout():
# Terminate sessions
session.pop('id', None)
return make_response('', 200)
|
Griffintaur/News-At-Command-Line
|
setup.py
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name="News At Command Line",
version="0.0.1",
description="Read your news on your favourite terminal",
author="<NAME>",
packages=['News'],
long_description= long_description,
install_requires=[
'bs4==0.0.1',
'PyYAML==5.1',
'requests==2.20.0',
],
license='MIT',
entry_points={
'console_scripts': [
'newsctl=News.Main:Main'
]
}
)
|
kjagodka/porth
|
porth.py
|
<filename>porth.py
#!/usr/bin/env python3
import os
import sys
import subprocess
import shlex
from os import path
from typing import *
from enum import IntEnum, Enum, auto
from dataclasses import dataclass
from copy import copy
from time import sleep
import traceback
PORTH_EXT = '.porth'
DEFAULT_EXPANSION_LIMIT=1000
EXPANSION_DIAGNOSTIC_LIMIT=10
MEM_CAPACITY = 640_000 # should be enough for everyone
SIM_NULL_POINTER_PADDING = 1 # just a little bit of a padding at the beginning of the memory to make 0 an invalid address
SIM_STR_CAPACITY = 640_000
SIM_ARGV_CAPACITY = 640_000
debug=False
Loc=Tuple[str, int, int]
class Keyword(Enum):
IF=auto()
ELIF=auto()
ELSE=auto()
END=auto()
WHILE=auto()
DO=auto()
MACRO=auto()
INCLUDE=auto()
class Intrinsic(Enum):
PLUS=auto()
MINUS=auto()
MUL=auto()
DIVMOD=auto()
EQ=auto()
GT=auto()
LT=auto()
GE=auto()
LE=auto()
NE=auto()
SHR=auto()
SHL=auto()
OR=auto()
AND=auto()
NOT=auto()
PRINT=auto()
DUP=auto()
SWAP=auto()
DROP=auto()
OVER=auto()
ROT=auto()
MEM=auto()
LOAD=auto()
STORE=auto()
FORTH_LOAD=auto()
FORTH_STORE=auto()
LOAD64=auto()
STORE64=auto()
FORTH_LOAD64=auto()
FORTH_STORE64=auto()
CAST_PTR=auto()
ARGC=auto()
ARGV=auto()
HERE=auto()
SYSCALL0=auto()
SYSCALL1=auto()
SYSCALL2=auto()
SYSCALL3=auto()
SYSCALL4=auto()
SYSCALL5=auto()
SYSCALL6=auto()
class OpType(Enum):
PUSH_INT=auto()
PUSH_STR=auto()
PUSH_CSTR=auto()
INTRINSIC=auto()
IF=auto()
ELIF=auto()
ELSE=auto()
END=auto()
WHILE=auto()
DO=auto()
class TokenType(Enum):
WORD=auto()
INT=auto()
STR=auto()
CSTR=auto()
CHAR=auto()
KEYWORD=auto()
assert len(TokenType) == 6, "Exhaustive Token type definition. The `value` field of the Token dataclass may require an update"
@dataclass
class Token:
typ: TokenType
text: str
loc: Loc
value: Union[int, str, Keyword]
# https://www.python.org/dev/peps/pep-0484/#forward-references
expanded_from: Optional['Token'] = None
expanded_count: int = 0
OpAddr=int
@dataclass
class Op:
typ: OpType
token: Token
operand: Optional[Union[int, str, Intrinsic, OpAddr]] = None
Program=List[Op]
def get_cstr_from_mem(mem: bytearray, ptr: int) -> bytes:
end = ptr
while mem[end] != 0:
end += 1
return mem[ptr:end]
def simulate_little_endian_linux(program: Program, argv: List[str]):
AT_FDCWD=-100
O_RDONLY=0
ENOENT=2
CLOCK_MONOTONIC=1
stack: List[int] = []
mem = bytearray(SIM_NULL_POINTER_PADDING + SIM_STR_CAPACITY + SIM_ARGV_CAPACITY + MEM_CAPACITY)
str_buf_ptr = SIM_NULL_POINTER_PADDING
str_ptrs: Dict[int, int] = {}
str_size = 0
argv_buf_ptr = SIM_NULL_POINTER_PADDING + SIM_STR_CAPACITY
argc = 0
mem_buf_ptr = SIM_NULL_POINTER_PADDING + SIM_STR_CAPACITY + SIM_ARGV_CAPACITY
fds: List[BinaryIO] = [sys.stdin.buffer, sys.stdout.buffer, sys.stderr.buffer]
for arg in argv:
value = arg.encode('utf-8')
n = len(value)
arg_ptr = str_buf_ptr + str_size
mem[arg_ptr:arg_ptr+n] = value
mem[arg_ptr+n] = 0
str_size += n + 1
assert str_size <= SIM_STR_CAPACITY, "String buffer overflow"
argv_ptr = argv_buf_ptr+argc*8
mem[argv_ptr:argv_ptr+8] = arg_ptr.to_bytes(8, byteorder='little')
argc += 1
assert argc*8 <= SIM_ARGV_CAPACITY, "Argv buffer, overflow"
ip = 0
while ip < len(program):
assert len(OpType) == 10, "Exhaustive op handling in simulate_little_endian_linux"
op = program[ip]
try:
if op.typ == OpType.PUSH_INT:
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
stack.append(op.operand)
ip += 1
elif op.typ == OpType.PUSH_STR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8')
n = len(value)
stack.append(n)
if ip not in str_ptrs:
str_ptr = str_buf_ptr+str_size
str_ptrs[ip] = str_ptr
mem[str_ptr:str_ptr+n] = value
str_size += n
assert str_size <= SIM_STR_CAPACITY, "String buffer overflow"
stack.append(str_ptrs[ip])
ip += 1
elif op.typ == OpType.PUSH_CSTR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8') + b'\0'
n = len(value)
if ip not in str_ptrs:
str_ptr = str_buf_ptr+str_size
str_ptrs[ip] = str_ptr
mem[str_ptr:str_ptr+n] = value
str_size += n
assert str_size <= SIM_STR_CAPACITY, "String buffer overflow"
stack.append(str_ptrs[ip])
ip += 1
elif op.typ == OpType.IF:
ip += 1
elif op.typ == OpType.WHILE:
ip += 1
elif op.typ == OpType.ELSE:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
elif op.typ == OpType.ELIF:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
elif op.typ == OpType.END:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
elif op.typ == OpType.DO:
a = stack.pop()
if a == 0:
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
ip = op.operand
else:
ip += 1
elif op.typ == OpType.INTRINSIC:
assert len(Intrinsic) == 41, "Exhaustive handling of intrinsic in simulate_little_endian_linux()"
if op.operand == Intrinsic.PLUS:
a = stack.pop()
b = stack.pop()
stack.append(a + b)
ip += 1
elif op.operand == Intrinsic.MINUS:
a = stack.pop()
b = stack.pop()
stack.append(b - a)
ip += 1
elif op.operand == Intrinsic.MUL:
a = stack.pop()
b = stack.pop()
stack.append(b * a)
ip += 1
elif op.operand == Intrinsic.DIVMOD:
a = stack.pop()
b = stack.pop()
stack.append(b // a)
stack.append(b % a)
ip += 1
elif op.operand == Intrinsic.EQ:
a = stack.pop()
b = stack.pop()
stack.append(int(a == b))
ip += 1
elif op.operand == Intrinsic.GT:
a = stack.pop()
b = stack.pop()
stack.append(int(b > a))
ip += 1
elif op.operand == Intrinsic.LT:
a = stack.pop()
b = stack.pop()
stack.append(int(b < a))
ip += 1
elif op.operand == Intrinsic.GE:
a = stack.pop()
b = stack.pop()
stack.append(int(b >= a))
ip += 1
elif op.operand == Intrinsic.LE:
a = stack.pop()
b = stack.pop()
stack.append(int(b <= a))
ip += 1
elif op.operand == Intrinsic.NE:
a = stack.pop()
b = stack.pop()
stack.append(int(b != a))
ip += 1
elif op.operand == Intrinsic.SHR:
a = stack.pop()
b = stack.pop()
stack.append(int(b >> a))
ip += 1
elif op.operand == Intrinsic.SHL:
a = stack.pop()
b = stack.pop()
stack.append(int(b << a))
ip += 1
elif op.operand == Intrinsic.OR:
a = stack.pop()
b = stack.pop()
stack.append(int(a | b))
ip += 1
elif op.operand == Intrinsic.AND:
a = stack.pop()
b = stack.pop()
stack.append(int(a & b))
ip += 1
elif op.operand == Intrinsic.NOT:
a = stack.pop()
stack.append(int(~a))
ip += 1
elif op.operand == Intrinsic.PRINT:
a = stack.pop()
fds[1].write(b"%d\n" % a)
fds[1].flush()
ip += 1
elif op.operand == Intrinsic.DUP:
a = stack.pop()
stack.append(a)
stack.append(a)
ip += 1
elif op.operand == Intrinsic.SWAP:
a = stack.pop()
b = stack.pop()
stack.append(a)
stack.append(b)
ip += 1
elif op.operand == Intrinsic.DROP:
stack.pop()
ip += 1
elif op.operand == Intrinsic.OVER:
a = stack.pop()
b = stack.pop()
stack.append(b)
stack.append(a)
stack.append(b)
ip += 1
elif op.operand == Intrinsic.ROT:
a = stack.pop()
b = stack.pop()
c = stack.pop()
stack.append(b)
stack.append(a)
stack.append(c)
ip += 1
elif op.operand == Intrinsic.MEM:
stack.append(mem_buf_ptr)
ip += 1
elif op.operand == Intrinsic.LOAD:
addr = stack.pop()
byte = mem[addr]
stack.append(byte)
ip += 1
elif op.operand == Intrinsic.STORE:
store_value = stack.pop()
store_addr = stack.pop()
mem[store_addr] = store_value & 0xFF
ip += 1
elif op.operand == Intrinsic.FORTH_LOAD:
addr = stack.pop()
byte = mem[addr]
stack.append(byte)
ip += 1
elif op.operand == Intrinsic.FORTH_STORE:
store_addr = stack.pop()
store_value = stack.pop()
mem[store_addr] = store_value & 0xFF
ip += 1
elif op.operand == Intrinsic.LOAD64:
addr = stack.pop()
_bytes = bytearray(8)
for offset in range(0,8):
_bytes[offset] = mem[addr + offset]
stack.append(int.from_bytes(_bytes, byteorder="little"))
ip += 1
elif op.operand == Intrinsic.STORE64:
store_value = stack.pop()
store_value64 = store_value.to_bytes(length=8, byteorder="little", signed=(store_value < 0));
store_addr64 = stack.pop();
for byte in store_value64:
mem[store_addr64] = byte;
store_addr64 += 1;
ip += 1
elif op.operand == Intrinsic.FORTH_LOAD64:
addr = stack.pop()
_bytes = bytearray(8)
for offset in range(0,8):
_bytes[offset] = mem[addr + offset]
stack.append(int.from_bytes(_bytes, byteorder="little"))
ip += 1
elif op.operand == Intrinsic.FORTH_STORE64:
store_addr64 = stack.pop();
store_value = stack.pop()
store_value64 = store_value.to_bytes(length=8, byteorder="little", signed=(store_value < 0));
for byte in store_value64:
mem[store_addr64] = byte;
store_addr64 += 1;
ip += 1
elif op.operand == Intrinsic.ARGC:
stack.append(argc)
ip += 1
elif op.operand == Intrinsic.ARGV:
stack.append(argv_buf_ptr)
ip += 1
elif op.operand == Intrinsic.HERE:
value = ("%s:%d:%d" % op.token.loc).encode('utf-8')
n = len(value)
stack.append(n)
if ip not in str_ptrs:
str_ptr = str_buf_ptr+str_size
str_ptrs[ip] = str_ptr
mem[str_ptr:str_ptr+n] = value
str_size += n
assert str_size <= SIM_STR_CAPACITY, "String buffer overflow"
stack.append(str_ptrs[ip])
ip += 1
elif op.operand == Intrinsic.CAST_PTR:
# Ignore the type casting. It's only useful for type_check_program() phase
ip += 1
elif op.operand == Intrinsic.SYSCALL0:
syscall_number = stack.pop();
if syscall_number == 39: # SYS_getpid
stack.append(os.getpid());
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL1:
syscall_number = stack.pop()
arg1 = stack.pop()
if syscall_number == 60: # SYS_exit
exit(arg1)
elif syscall_number == 3: # SYS_close
fds[arg1].close()
stack.append(0)
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL2:
assert False, "not implemented"
elif op.operand == Intrinsic.SYSCALL3:
syscall_number = stack.pop()
arg1 = stack.pop()
arg2 = stack.pop()
arg3 = stack.pop()
if syscall_number == 0: # SYS_read
fd = arg1
buf = arg2
count = arg3
# NOTE: trying to behave like a POSIX tty in canonical mode by making the data available
# on each newline
# https://en.wikipedia.org/wiki/POSIX_terminal_interface#Canonical_mode_processing
# TODO: maybe this behavior should be customizable
data = fds[fd].readline(count)
mem[buf:buf+len(data)] = data
stack.append(len(data))
elif syscall_number == 1: # SYS_write
fd = arg1
buf = arg2
count = arg3
fds[fd].write(mem[buf:buf+count])
fds[fd].flush()
stack.append(count)
elif syscall_number == 257: # SYS_openat
dirfd = arg1
pathname_ptr = arg2
flags = arg3
if dirfd != AT_FDCWD:
assert False, "openat: unsupported dirfd"
if flags != O_RDONLY:
assert False, "openat: unsupported flags"
pathname = get_cstr_from_mem(mem, pathname_ptr).decode('utf-8')
fd = len(fds)
try:
fds.append(open(pathname, 'rb'))
stack.append(fd)
except FileNotFoundError:
stack.append(-ENOENT)
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL4:
syscall_number = stack.pop()
arg1 = stack.pop()
arg2 = stack.pop()
arg3 = stack.pop()
arg4 = stack.pop()
if syscall_number == 230: # clock_nanosleep
clock_id = arg1
flags = arg2
request_ptr = arg3
remain_ptr = arg4
assert clock_id == CLOCK_MONOTONIC, "Only CLOCK_MONOTONIC is implemented for SYS_clock_nanosleep"
assert flags == 0, "Only relative time is supported for SYS_clock_nanosleep"
assert request_ptr != 0, "request cannot be NULL for SYS_clock_nanosleep. We should probably return -1 in that case..."
assert remain_ptr == 0, "remain is not supported for SYS_clock_nanosleep"
seconds = int.from_bytes(mem[request_ptr:request_ptr+8], byteorder='little')
nano_seconds = int.from_bytes(mem[request_ptr+8:request_ptr+8+8], byteorder='little')
sleep(float(seconds)+float(nano_seconds)*1e-09)
stack.append(0)
else:
assert False, "unknown syscall number %d" % syscall_number
ip += 1
elif op.operand == Intrinsic.SYSCALL5:
assert False, "not implemented"
elif op.operand == Intrinsic.SYSCALL6:
assert False, "not implemented"
else:
assert False, "unreachable"
else:
assert False, "unreachable"
except Exception as e:
compiler_error_with_expansion_stack(op.token, "Python Exception during simulation")
traceback.print_exception(type(e), e, e.__traceback__)
exit(1)
if debug:
print("[INFO] Memory dump")
print(mem[:20])
class DataType(IntEnum):
INT=auto()
BOOL=auto()
PTR=auto()
def compiler_diagnostic(loc: Loc, tag: str, message: str):
print("%s:%d:%d: %s: %s" % (loc + (tag, message)), file=sys.stderr)
def compiler_diagnostic_with_expansion_stack(token: Token, tag: str, message: str):
compiler_diagnostic(token.loc, tag, message)
stack = token.expanded_from
limit = 0
while stack is not None and limit <= EXPANSION_DIAGNOSTIC_LIMIT:
compiler_note(stack.loc, "expanded from `%s`" % stack.text)
stack = stack.expanded_from
limit += 1
if limit > EXPANSION_DIAGNOSTIC_LIMIT:
print('...', file=sys.stderr)
print('... too many expansions ...', file=sys.stderr)
print('...', file=sys.stderr)
def compiler_error(loc: Loc, message: str):
compiler_diagnostic(loc, 'ERROR', message)
def compiler_error_with_expansion_stack(token: Token, message: str):
compiler_diagnostic_with_expansion_stack(token, 'ERROR', message)
def compiler_note(loc: Loc, message: str):
compiler_diagnostic(loc, 'NOTE', message)
def not_enough_arguments(op: Op):
if op.typ == OpType.INTRINSIC:
assert isinstance(op.operand, Intrinsic)
compiler_error_with_expansion_stack(op.token, "not enough arguments for the `%s` intrinsic" % INTRINSIC_NAMES[op.operand])
# TODO: why don't we add while-do here too?
elif op.typ == OpType.IF:
compiler_error_with_expansion_stack(op.token, "not enough arguments for the if-block")
else:
assert False, "unsupported type of operation"
DataStack=List[Tuple[DataType, Token]]
# TODO: `if 1 10 < do 69 32 elif 2 10 < do 420 end` does not properly type check
def type_check_program(program: Program):
stack: DataStack = []
block_stack: List[Tuple[DataStack, OpType]] = []
for ip in range(len(program)):
op = program[ip]
assert len(OpType) == 10, "Exhaustive ops handling in type_check_program()"
if op.typ == OpType.PUSH_INT:
stack.append((DataType.INT, op.token))
elif op.typ == OpType.PUSH_STR:
stack.append((DataType.INT, op.token))
stack.append((DataType.PTR, op.token))
elif op.typ == OpType.PUSH_CSTR:
stack.append((DataType.PTR, op.token))
elif op.typ == OpType.INTRINSIC:
assert len(Intrinsic) == 41, "Exhaustive intrinsic handling in type_check_program()"
assert isinstance(op.operand, Intrinsic), "This could be a bug in compilation step"
if op.operand == Intrinsic.PLUS:
assert len(DataType) == 3, "Exhaustive type handling in PLUS intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == DataType.INT and b_type == DataType.INT:
stack.append((DataType.INT, op.token))
elif a_type == DataType.INT and b_type == DataType.PTR:
stack.append((DataType.PTR, op.token))
elif a_type == DataType.PTR and b_type == DataType.INT:
stack.append((DataType.PTR, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument types for PLUS intrinsic. Expected INT or PTR")
exit(1)
elif op.operand == Intrinsic.MINUS:
assert len(DataType) == 3, "Exhaustive type handling in MINUS intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and (a_type == DataType.INT or a_type == DataType.PTR):
stack.append((DataType.INT, op.token))
elif b_type == DataType.PTR and a_type == DataType.INT:
stack.append((DataType.PTR, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument types fo MINUS intrinsic: %s" % [b_type, a_type])
exit(1)
elif op.operand == Intrinsic.MUL:
assert len(DataType) == 3, "Exhaustive type handling in MUL intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument types fo MUL intrinsic. Expected INT.")
exit(1)
elif op.operand == Intrinsic.DIVMOD:
assert len(DataType) == 3, "Exhaustive type handling in DIVMOD intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.INT, op.token))
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument types fo DIVMOD intrinsic. Expected INT.")
exit(1)
elif op.operand == Intrinsic.EQ:
assert len(DataType) == 3, "Exhaustive type handling in EQ intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument types fo EQ intrinsic. Expected INT.")
exit(1)
elif op.operand == Intrinsic.GT:
assert len(DataType) == 3, "Exhaustive type handling in GT intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for GT intrinsic")
exit(1)
elif op.operand == Intrinsic.LT:
assert len(DataType) == 3, "Exhaustive type handling in LT intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for LT intrinsic")
exit(1)
elif op.operand == Intrinsic.GE:
assert len(DataType) == 3, "Exhaustive type handling in GE intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for GE intrinsic")
exit(1)
elif op.operand == Intrinsic.LE:
assert len(DataType) == 3, "Exhaustive type handling in LE intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for LE intrinsic")
exit(1)
elif op.operand == Intrinsic.NE:
assert len(DataType) == 3, "Exhaustive type handling in NE intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for NE intrinsic")
exit(1)
elif op.operand == Intrinsic.SHR:
assert len(DataType) == 3, "Exhaustive type handling in SHR intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for SHR intrinsic")
exit(1)
elif op.operand == Intrinsic.SHL:
assert len(DataType) == 3, "Exhaustive type handling in SHL intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for SHL intrinsic")
exit(1)
elif op.operand == Intrinsic.OR:
assert len(DataType) == 3, "Exhaustive type handling in OR intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.INT, op.token))
elif a_type == b_type and a_type == DataType.BOOL:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for OR intrinsic")
exit(1)
elif op.operand == Intrinsic.AND:
assert len(DataType) == 3, "Exhaustive type handling in AND intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == b_type and a_type == DataType.INT:
stack.append((DataType.INT, op.token))
elif a_type == b_type and a_type == DataType.BOOL:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for AND intrinsic")
exit(1)
elif op.operand == Intrinsic.NOT:
assert len(DataType) == 3, "Exhaustive type handling in NOT intrinsic"
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
if a_type == DataType.INT:
stack.append((DataType.INT, op.token))
elif a_type == DataType.BOOL:
stack.append((DataType.BOOL, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for NOT intrinsic")
exit(1)
elif op.operand == Intrinsic.PRINT:
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
stack.pop()
elif op.operand == Intrinsic.DUP:
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a = stack.pop()
stack.append(a)
stack.append(a)
elif op.operand == Intrinsic.SWAP:
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a = stack.pop()
b = stack.pop()
stack.append(a)
stack.append(b)
elif op.operand == Intrinsic.DROP:
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
stack.pop()
elif op.operand == Intrinsic.OVER:
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a = stack.pop()
b = stack.pop()
stack.append(b)
stack.append(a)
stack.append(b)
elif op.operand == Intrinsic.ROT:
if len(stack) < 3:
not_enough_arguments(op)
exit(1)
a = stack.pop()
b = stack.pop()
c = stack.pop()
stack.append(b)
stack.append(a)
stack.append(c)
elif op.operand == Intrinsic.MEM:
stack.append((DataType.PTR, op.token))
elif op.operand == Intrinsic.LOAD:
assert len(DataType) == 3, "Exhaustive type handling in LOAD intrinsic"
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
if a_type == DataType.PTR:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for LOAD intrinsic: %s" % a_type)
exit(1)
elif op.operand == Intrinsic.STORE:
assert len(DataType) == 3, "Exhaustive type handling in STORE intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == DataType.INT and b_type == DataType.PTR:
pass
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for STORE intrinsic")
exit(1)
elif op.operand == Intrinsic.FORTH_LOAD:
assert len(DataType) == 3, "Exhaustive type handling in LOAD intrinsic"
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
if a_type == DataType.PTR:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for LOAD intrinsic: %s" % a_type)
exit(1)
elif op.operand == Intrinsic.FORTH_STORE:
assert len(DataType) == 3, "Exhaustive type handling in STORE intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if a_type == DataType.PTR and b_type == DataType.INT:
pass
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for STORE intrinsic")
exit(1)
elif op.operand == Intrinsic.LOAD64:
assert len(DataType) == 3, "Exhaustive type handling in LOAD64 intrinsic"
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
if a_type == DataType.PTR:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for LOAD64 intrinsic")
exit(1)
elif op.operand == Intrinsic.STORE64:
assert len(DataType) == 3, "Exhaustive type handling in STORE64 intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if (a_type == DataType.INT or a_type == DataType.PTR) and b_type == DataType.PTR:
pass
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for STORE64 intrinsic: %s" % [b_type, a_type])
exit(1)
elif op.operand == Intrinsic.FORTH_LOAD64:
assert len(DataType) == 3, "Exhaustive type handling in LOAD64 intrinsic"
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
if a_type == DataType.PTR:
stack.append((DataType.INT, op.token))
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for LOAD64 intrinsic")
exit(1)
elif op.operand == Intrinsic.FORTH_STORE64:
assert len(DataType) == 3, "Exhaustive type handling in STORE64 intrinsic"
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
a_type, a_loc = stack.pop()
b_type, b_loc = stack.pop()
if (b_type == DataType.INT or b_type == DataType.PTR) and a_type == DataType.PTR:
pass
else:
compiler_error_with_expansion_stack(op.token, "invalid argument type for STORE64 intrinsic: %s" % [b_type, a_type])
exit(1)
elif op.operand == Intrinsic.CAST_PTR:
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_token = stack.pop()
stack.append((DataType.PTR, a_token))
elif op.operand == Intrinsic.ARGC:
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.ARGV:
stack.append((DataType.PTR, op.token))
elif op.operand == Intrinsic.HERE:
stack.append((DataType.INT, op.token))
stack.append((DataType.PTR, op.token))
# TODO: figure out how to type check syscall arguments and return types
elif op.operand == Intrinsic.SYSCALL0:
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
for i in range(1):
stack.pop()
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.SYSCALL1:
if len(stack) < 2:
not_enough_arguments(op)
exit(1)
for i in range(2):
stack.pop()
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.SYSCALL2:
if len(stack) < 3:
not_enough_arguments(op)
exit(1)
for i in range(3):
stack.pop()
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.SYSCALL3:
if len(stack) < 4:
not_enough_arguments(op)
exit(1)
for i in range(4):
stack.pop()
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.SYSCALL4:
if len(stack) < 5:
not_enough_arguments(op)
exit(1)
for i in range(5):
stack.pop()
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.SYSCALL5:
if len(stack) < 6:
not_enough_arguments(op)
exit(1)
for i in range(6):
stack.pop()
stack.append((DataType.INT, op.token))
elif op.operand == Intrinsic.SYSCALL6:
if len(stack) < 7:
not_enough_arguments(op)
exit(1)
for i in range(7):
stack.pop()
stack.append((DataType.INT, op.token))
else:
assert False, "unreachable"
elif op.typ == OpType.IF:
block_stack.append((copy(stack), op.typ))
elif op.typ == OpType.WHILE:
block_stack.append((copy(stack), op.typ))
elif op.typ == OpType.END:
block_snapshot, block_type = block_stack.pop()
assert len(OpType) == 10, "Exhaustive handling of op types"
if block_type == OpType.ELSE:
expected_types = list(map(lambda x: x[0], block_snapshot))
actual_types = list(map(lambda x: x[0], stack))
if expected_types != actual_types:
compiler_error_with_expansion_stack(op.token, 'all branches of the if-block must produce the same types of the arguments on the data stack')
compiler_note(op.token.loc, 'Expected types: %s' % expected_types)
compiler_note(op.token.loc, 'Actual types: %s' % actual_types)
exit(1)
elif block_type == OpType.ELIF:
expected_types = list(map(lambda x: x[0], block_snapshot))
actual_types = list(map(lambda x: x[0], stack))
if expected_types != actual_types:
compiler_error_with_expansion_stack(op.token, 'all branches of the if-block must produce the same types of the arguments on the data stack')
compiler_note(op.token.loc, 'Expected types: %s' % expected_types)
compiler_note(op.token.loc, 'Actual types: %s' % actual_types)
exit(1)
elif block_type == OpType.DO:
begin_snapshot, begin_type = block_stack.pop()
if begin_type == OpType.WHILE:
expected_types = list(map(lambda x: x[0], begin_snapshot))
actual_types = list(map(lambda x: x[0], stack))
if expected_types != actual_types:
compiler_error_with_expansion_stack(op.token, 'while-do body is not allowed to alter the types of the arguments on the data stack')
compiler_note(op.token.loc, 'Expected types: %s' % expected_types)
compiler_note(op.token.loc, 'Actual types: %s' % actual_types)
exit(1)
stack = block_snapshot
elif begin_type == OpType.IF:
expected_types = list(map(lambda x: x[0], begin_snapshot))
actual_types = list(map(lambda x: x[0], stack))
if expected_types != actual_types:
compiler_error_with_expansion_stack(op.token, 'else-less if block is not allowed to alter the types of the arguments on the data stack')
compiler_note(op.token.loc, 'Expected types: %s' % expected_types)
compiler_note(op.token.loc, 'Actual types: %s' % actual_types)
exit(1)
stack = block_snapshot
else:
assert "unreachable"
else:
assert "unreachable"
elif op.typ == OpType.ELSE:
do_snapshot, do_type = block_stack.pop()
assert do_type == OpType.DO
pre_do_snapshot, pre_do_type = block_stack.pop()
assert pre_do_type == OpType.IF or pre_do_type == OpType.ELIF, pre_do_type
if pre_do_type == OpType.ELIF:
expected_types = list(map(lambda x: x[0], pre_do_snapshot))
actual_types = list(map(lambda x: x[0], stack))
if expected_types != actual_types:
compiler_error_with_expansion_stack(op.token, 'all branches of the if-block must produce the same types of the arguments on the data stack')
compiler_note(op.token.loc, 'Expected types: %s' % expected_types)
compiler_note(op.token.loc, 'Actual types: %s' % actual_types)
exit(1)
block_stack.append((copy(stack), op.typ))
stack = do_snapshot
elif op.typ == OpType.ELIF:
do_snapshot, do_type = block_stack.pop()
assert do_type == OpType.DO
pre_do_snapshot, pre_do_type = block_stack.pop()
assert pre_do_type == OpType.IF or pre_do_type == OpType.ELIF, pre_do_type
if pre_do_type == OpType.ELIF:
expected_types = list(map(lambda x: x[0], pre_do_snapshot))
actual_types = list(map(lambda x: x[0], stack))
if expected_types != actual_types:
compiler_error_with_expansion_stack(op.token, 'all branches of the if-block must produce the same types of the arguments on the data stack')
compiler_note(op.token.loc, 'Expected types: %s' % expected_types)
compiler_note(op.token.loc, 'Actual types: %s' % actual_types)
exit(1)
block_stack.append((copy(stack), op.typ))
stack = do_snapshot
elif op.typ == OpType.DO:
if len(stack) < 1:
not_enough_arguments(op)
exit(1)
a_type, a_token = stack.pop()
if a_type != DataType.BOOL:
compiler_error_with_expansion_stack(op.token, "Invalid argument for the while-do condition. Expected BOOL.")
exit(1)
block_stack.append((copy(stack), op.typ))
else:
assert False, "unreachable"
if len(stack) != 0:
compiler_error_with_expansion_stack(stack[-1][1], "unhandled data on the stack: %s" % list(map(lambda x: x[0], stack)))
exit(1)
def generate_nasm_linux_x86_64(program: Program, out_file_path: str):
strs: List[bytes] = []
with open(out_file_path, "w") as out:
out.write("BITS 64\n")
out.write("segment .text\n")
out.write("print:\n")
out.write(" mov r9, -3689348814741910323\n")
out.write(" sub rsp, 40\n")
out.write(" mov BYTE [rsp+31], 10\n")
out.write(" lea rcx, [rsp+30]\n")
out.write(".L2:\n")
out.write(" mov rax, rdi\n")
out.write(" lea r8, [rsp+32]\n")
out.write(" mul r9\n")
out.write(" mov rax, rdi\n")
out.write(" sub r8, rcx\n")
out.write(" shr rdx, 3\n")
out.write(" lea rsi, [rdx+rdx*4]\n")
out.write(" add rsi, rsi\n")
out.write(" sub rax, rsi\n")
out.write(" add eax, 48\n")
out.write(" mov BYTE [rcx], al\n")
out.write(" mov rax, rdi\n")
out.write(" mov rdi, rdx\n")
out.write(" mov rdx, rcx\n")
out.write(" sub rcx, 1\n")
out.write(" cmp rax, 9\n")
out.write(" ja .L2\n")
out.write(" lea rax, [rsp+32]\n")
out.write(" mov edi, 1\n")
out.write(" sub rdx, rax\n")
out.write(" xor eax, eax\n")
out.write(" lea rsi, [rsp+32+rdx]\n")
out.write(" mov rdx, r8\n")
out.write(" mov rax, 1\n")
out.write(" syscall\n")
out.write(" add rsp, 40\n")
out.write(" ret\n")
out.write("global _start\n")
out.write("_start:\n")
out.write(" mov [args_ptr], rsp\n")
for ip in range(len(program)):
op = program[ip]
assert len(OpType) == 10, "Exhaustive ops handling in generate_nasm_linux_x86_64"
out.write("addr_%d:\n" % ip)
if op.typ == OpType.PUSH_INT:
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
out.write(" ;; -- push int %d --\n" % op.operand)
out.write(" mov rax, %d\n" % op.operand)
out.write(" push rax\n")
elif op.typ == OpType.PUSH_STR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8')
n = len(value)
out.write(" ;; -- push str --\n")
out.write(" mov rax, %d\n" % n)
out.write(" push rax\n")
out.write(" push str_%d\n" % len(strs))
strs.append(value)
elif op.typ == OpType.PUSH_CSTR:
assert isinstance(op.operand, str), "This could be a bug in the parsing step"
value = op.operand.encode('utf-8') + b'\0'
out.write(" ;; -- push str --\n")
out.write(" push str_%d\n" % len(strs))
strs.append(value)
elif op.typ == OpType.IF:
out.write(" ;; -- if --\n")
elif op.typ == OpType.WHILE:
out.write(" ;; -- while --\n")
elif op.typ == OpType.ELSE:
out.write(" ;; -- else --\n")
assert isinstance(op.operand, OpAddr), "This could be a bug in the parsing step"
out.write(" jmp addr_%d\n" % op.operand)
elif op.typ == OpType.ELIF:
out.write(" ;; -- elif --\n")
assert isinstance(op.operand, OpAddr), f"This could be a bug in the parsing step: {op.operand}"
out.write(" jmp addr_%d\n" % op.operand)
elif op.typ == OpType.END:
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
out.write(" ;; -- end --\n")
if ip + 1 != op.operand:
out.write(" jmp addr_%d\n" % op.operand)
elif op.typ == OpType.DO:
out.write(" ;; -- do --\n")
out.write(" pop rax\n")
out.write(" test rax, rax\n")
assert isinstance(op.operand, int), "This could be a bug in the parsing step"
out.write(" jz addr_%d\n" % op.operand)
elif op.typ == OpType.INTRINSIC:
assert len(Intrinsic) == 41, "Exhaustive intrinsic handling in generate_nasm_linux_x86_64()"
if op.operand == Intrinsic.PLUS:
out.write(" ;; -- plus --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" add rax, rbx\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.MINUS:
out.write(" ;; -- minus --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" sub rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.MUL:
out.write(" ;; -- mul --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" mul rbx\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.DIVMOD:
out.write(" ;; -- mod --\n")
out.write(" xor rdx, rdx\n")
out.write(" pop rbx\n")
out.write(" pop rax\n")
out.write(" div rbx\n")
out.write(" push rax\n");
out.write(" push rdx\n");
elif op.operand == Intrinsic.SHR:
out.write(" ;; -- shr --\n")
out.write(" pop rcx\n")
out.write(" pop rbx\n")
out.write(" shr rbx, cl\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.SHL:
out.write(" ;; -- shl --\n")
out.write(" pop rcx\n")
out.write(" pop rbx\n")
out.write(" shl rbx, cl\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.OR:
out.write(" ;; -- bor --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" or rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.AND:
out.write(" ;; -- band --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" and rbx, rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.NOT:
out.write(" ;; -- not --\n")
out.write(" pop rax\n")
out.write(" not rax\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.PRINT:
out.write(" ;; -- print --\n")
out.write(" pop rdi\n")
out.write(" call print\n")
elif op.operand == Intrinsic.EQ:
out.write(" ;; -- equal --\n")
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" cmp rax, rbx\n");
out.write(" cmove rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.GT:
out.write(" ;; -- gt --\n")
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovg rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.LT:
out.write(" ;; -- gt --\n")
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovl rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.GE:
out.write(" ;; -- gt --\n")
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovge rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.LE:
out.write(" ;; -- gt --\n")
out.write(" mov rcx, 0\n");
out.write(" mov rdx, 1\n");
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" cmp rax, rbx\n");
out.write(" cmovle rcx, rdx\n");
out.write(" push rcx\n")
elif op.operand == Intrinsic.NE:
out.write(" ;; -- ne --\n")
out.write(" mov rcx, 0\n")
out.write(" mov rdx, 1\n")
out.write(" pop rbx\n")
out.write(" pop rax\n")
out.write(" cmp rax, rbx\n")
out.write(" cmovne rcx, rdx\n")
out.write(" push rcx\n")
elif op.operand == Intrinsic.DUP:
out.write(" ;; -- dup --\n")
out.write(" pop rax\n")
out.write(" push rax\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SWAP:
out.write(" ;; -- swap --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" push rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.DROP:
out.write(" ;; -- drop --\n")
out.write(" pop rax\n")
elif op.operand == Intrinsic.OVER:
out.write(" ;; -- over --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" push rbx\n")
out.write(" push rax\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.ROT:
out.write(" ;; -- rot --\n")
out.write(" pop rax\n")
out.write(" pop rbx\n")
out.write(" pop rcx\n")
out.write(" push rbx\n")
out.write(" push rax\n")
out.write(" push rcx\n")
elif op.operand == Intrinsic.MEM:
out.write(" ;; -- mem --\n")
out.write(" push mem\n")
elif op.operand == Intrinsic.LOAD:
out.write(" ;; -- load --\n")
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov bl, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.STORE:
out.write(" ;; -- store --\n")
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" mov [rax], bl\n");
elif op.operand == Intrinsic.FORTH_LOAD:
out.write(" ;; -- forth load --\n")
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov bl, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.FORTH_STORE:
out.write(" ;; -- store --\n")
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" mov [rax], bl\n");
elif op.operand == Intrinsic.ARGC:
out.write(" ;; -- argc --\n")
out.write(" mov rax, [args_ptr]\n")
out.write(" mov rax, [rax]\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.ARGV:
out.write(" ;; -- argv --\n")
out.write(" mov rax, [args_ptr]\n")
out.write(" add rax, 8\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.HERE:
value = ("%s:%d:%d" % op.token.loc).encode('utf-8')
n = len(value)
out.write(" ;; -- here --\n")
out.write(" mov rax, %d\n" % n)
out.write(" push rax\n")
out.write(" push str_%d\n" % len(strs))
strs.append(value)
elif op.operand == Intrinsic.LOAD64:
out.write(" ;; -- load --\n")
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov rbx, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.STORE64:
out.write(" ;; -- store --\n")
out.write(" pop rbx\n");
out.write(" pop rax\n");
out.write(" mov [rax], rbx\n");
elif op.operand == Intrinsic.FORTH_LOAD64:
out.write(" ;; -- forth load64 --\n")
out.write(" pop rax\n")
out.write(" xor rbx, rbx\n")
out.write(" mov rbx, [rax]\n")
out.write(" push rbx\n")
elif op.operand == Intrinsic.FORTH_STORE64:
out.write(" ;; -- forth store64 --\n")
out.write(" pop rax\n");
out.write(" pop rbx\n");
out.write(" mov [rax], rbx\n");
elif op.operand == Intrinsic.CAST_PTR:
out.write(" ;; -- cast(ptr) --\n")
elif op.operand == Intrinsic.SYSCALL0:
out.write(" ;; -- syscall0 --\n")
out.write(" pop rax\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL1:
out.write(" ;; -- syscall1 --\n")
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL2:
out.write(" ;; -- syscall2 --\n")
out.write(" pop rax\n");
out.write(" pop rdi\n");
out.write(" pop rsi\n");
out.write(" syscall\n");
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL3:
out.write(" ;; -- syscall3 --\n")
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL4:
out.write(" ;; -- syscall4 --\n")
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" pop r10\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL5:
out.write(" ;; -- syscall5 --\n")
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" pop r10\n")
out.write(" pop r8\n")
out.write(" syscall\n")
out.write(" push rax\n")
elif op.operand == Intrinsic.SYSCALL6:
out.write(" ;; -- syscall6 --\n")
out.write(" pop rax\n")
out.write(" pop rdi\n")
out.write(" pop rsi\n")
out.write(" pop rdx\n")
out.write(" pop r10\n")
out.write(" pop r8\n")
out.write(" pop r9\n")
out.write(" syscall\n")
out.write(" push rax\n")
else:
assert False, "unreachable"
else:
assert False, "unreachable"
out.write("addr_%d:\n" % len(program))
out.write(" mov rax, 60\n")
out.write(" mov rdi, 0\n")
out.write(" syscall\n")
out.write("segment .data\n")
for index, s in enumerate(strs):
out.write("str_%d: db %s\n" % (index, ','.join(map(hex, list(s)))))
out.write("segment .bss\n")
out.write("args_ptr: resq 1\n")
out.write("mem: resb %d\n" % MEM_CAPACITY)
assert len(Keyword) == 8, "Exhaustive KEYWORD_NAMES definition."
KEYWORD_NAMES = {
'if': Keyword.IF,
'elif': Keyword.ELIF,
'else': Keyword.ELSE,
'end': Keyword.END,
'while': Keyword.WHILE,
'do': Keyword.DO,
'macro': Keyword.MACRO,
'include': Keyword.INCLUDE,
}
assert len(Intrinsic) == 41, "Exhaustive INTRINSIC_BY_NAMES definition"
INTRINSIC_BY_NAMES = {
'+': Intrinsic.PLUS,
'-': Intrinsic.MINUS,
'*': Intrinsic.MUL,
'divmod': Intrinsic.DIVMOD,
'print': Intrinsic.PRINT,
'=': Intrinsic.EQ,
'>': Intrinsic.GT,
'<': Intrinsic.LT,
'>=': Intrinsic.GE,
'<=': Intrinsic.LE,
'!=': Intrinsic.NE,
'shr': Intrinsic.SHR,
'shl': Intrinsic.SHL,
'or': Intrinsic.OR,
'and': Intrinsic.AND,
'not': Intrinsic.NOT,
'dup': Intrinsic.DUP,
'swap': Intrinsic.SWAP,
'drop': Intrinsic.DROP,
'over': Intrinsic.OVER,
'rot': Intrinsic.ROT,
'mem': Intrinsic.MEM,
'.': Intrinsic.STORE,
',': Intrinsic.LOAD,
'!': Intrinsic.FORTH_STORE,
'@': Intrinsic.FORTH_LOAD,
'.64': Intrinsic.STORE64,
',64': Intrinsic.LOAD64,
'!64': Intrinsic.FORTH_STORE64,
'@64': Intrinsic.FORTH_LOAD64,
'cast(ptr)': Intrinsic.CAST_PTR,
'argc': Intrinsic.ARGC,
'argv': Intrinsic.ARGV,
'here': Intrinsic.HERE,
'syscall0': Intrinsic.SYSCALL0,
'syscall1': Intrinsic.SYSCALL1,
'syscall2': Intrinsic.SYSCALL2,
'syscall3': Intrinsic.SYSCALL3,
'syscall4': Intrinsic.SYSCALL4,
'syscall5': Intrinsic.SYSCALL5,
'syscall6': Intrinsic.SYSCALL6,
}
INTRINSIC_NAMES = {v: k for k, v in INTRINSIC_BY_NAMES.items()}
@dataclass
class Macro:
loc: Loc
tokens: List[Token]
def human(obj: Union[TokenType, Op, Intrinsic]) -> str:
'''Human readable representation of an object that can be used in error messages'''
assert len(TokenType) == 6, "Exhaustive handling of token types in human()"
if obj == TokenType.WORD:
return "a word"
elif obj == TokenType.INT:
return "an integer"
elif obj == TokenType.STR:
return "a string"
elif obj == TokenType.CSTR:
return "a C-style string"
elif obj == TokenType.CHAR:
return "a character"
elif obj == TokenType.KEYWORD:
return "a keyword"
else:
assert False, "unreachable"
def expand_macro(macro: Macro, expanded_from: Token) -> List[Token]:
result = list(map(lambda x: copy(x), macro.tokens))
for token in result:
token.expanded_from = expanded_from
token.expanded_count = expanded_from.expanded_count + 1
return result
def parse_program_from_tokens(tokens: List[Token], include_paths: List[str], expansion_limit: int) -> Program:
stack: List[OpAddr] = []
program: List[Op] = []
rtokens: List[Token] = list(reversed(tokens))
macros: Dict[str, Macro] = {}
ip: OpAddr = 0;
while len(rtokens) > 0:
token = rtokens.pop()
assert len(TokenType) == 6, "Exhaustive token handling in parse_program_from_tokens"
if token.typ == TokenType.WORD:
assert isinstance(token.value, str), "This could be a bug in the lexer"
if token.value in INTRINSIC_BY_NAMES:
program.append(Op(typ=OpType.INTRINSIC, token=token, operand=INTRINSIC_BY_NAMES[token.value]))
ip += 1
elif token.value in macros:
if token.expanded_count >= expansion_limit:
compiler_error_with_expansion_stack(token, "the macro exceeded the expansion limit (it expanded %d times)" % token.expanded_count)
exit(1)
rtokens += reversed(expand_macro(macros[token.value], token))
else:
compiler_error_with_expansion_stack(token, "unknown word `%s`" % token.value)
exit(1)
elif token.typ == TokenType.INT:
assert isinstance(token.value, int), "This could be a bug in the lexer"
program.append(Op(typ=OpType.PUSH_INT, operand=token.value, token=token))
ip += 1
elif token.typ == TokenType.STR:
assert isinstance(token.value, str), "This could be a bug in the lexer"
program.append(Op(typ=OpType.PUSH_STR, operand=token.value, token=token));
ip += 1
elif token.typ == TokenType.CSTR:
assert isinstance(token.value, str), "This could be a bug in the lexer"
program.append(Op(typ=OpType.PUSH_CSTR, operand=token.value, token=token));
ip += 1
elif token.typ == TokenType.CHAR:
assert isinstance(token.value, int)
program.append(Op(typ=OpType.PUSH_INT, operand=token.value, token=token));
ip += 1
elif token.typ == TokenType.KEYWORD:
assert len(Keyword) == 8, "Exhaustive keywords handling in parse_program_from_tokens()"
if token.value == Keyword.IF:
program.append(Op(typ=OpType.IF, token=token))
stack.append(ip)
ip += 1
elif token.value == Keyword.ELIF:
program.append(Op(typ=OpType.ELIF, token=token))
do_ip = stack.pop()
if program[do_ip].typ != OpType.DO:
compiler_error_with_expansion_stack(program[do_ip].token, '`elif` can only close `do`-blocks')
exit(1)
pre_do_ip = program[do_ip].operand
assert isinstance(pre_do_ip, OpAddr)
if program[pre_do_ip].typ == OpType.IF:
program[ip].operand = pre_do_ip
program[do_ip].operand = ip + 1
stack.append(ip)
ip += 1
elif program[pre_do_ip].typ == OpType.ELIF:
program[ip].operand = pre_do_ip
program[do_ip].operand = ip + 1
stack.append(ip)
ip += 1
else:
compiler_error_with_expansion_stack(program[pre_do_ip].token, '`elif` can only close `do`-blocks that are preceded by `if` or another `elif`')
exit(1)
elif token.value == Keyword.ELSE:
program.append(Op(typ=OpType.ELSE, token=token))
do_ip = stack.pop()
if program[do_ip].typ != OpType.DO:
compiler_error_with_expansion_stack(program[do_ip].token, '`else` can only be used in `do` blocks')
exit(1)
pre_do_ip = program[do_ip].operand
assert isinstance(pre_do_ip, OpAddr)
if program[pre_do_ip].typ == OpType.IF:
program[ip].operand = pre_do_ip
program[do_ip].operand = ip + 1
stack.append(ip)
ip += 1
elif program[pre_do_ip].typ == OpType.ELIF:
program[ip].operand = pre_do_ip
program[do_ip].operand = ip + 1
stack.append(ip)
ip += 1
else:
compiler_error_with_expansion_stack(program[pre_do_ip].token, '`else` can only close `do`-blocks that are preceded by `if` or `elif`')
exit(1)
elif token.value == Keyword.END:
program.append(Op(typ=OpType.END, token=token))
block_ip = stack.pop()
if program[block_ip].typ == OpType.ELSE:
prev_block_ip = program[block_ip].operand
program[block_ip].operand = ip
while program[prev_block_ip].typ == OpType.ELIF:
temp = program[prev_block_ip].operand
program[prev_block_ip].operand = ip
prev_block_ip = temp
assert program[prev_block_ip].typ == OpType.IF
program[ip].operand = ip + 1
elif program[block_ip].typ == OpType.DO:
assert program[block_ip].operand is not None
pre_do_ip = program[block_ip].operand
assert isinstance(pre_do_ip, OpAddr)
if program[pre_do_ip].typ == OpType.WHILE:
program[ip].operand = pre_do_ip
program[block_ip].operand = ip + 1
elif program[pre_do_ip].typ == OpType.IF:
program[ip].operand = ip + 1
program[block_ip].operand = ip + 1
elif program[pre_do_ip].typ == OpType.ELIF:
program[pre_do_ip].operand = ip
program[ip].operand = ip + 1
program[block_ip].operand = ip + 1
else:
compiler_error_with_expansion_stack(program[pre_do_ip].token, '`end` can only close `do` blocks that are preceded by `if`, `while` or `elif`')
exit(1)
else:
compiler_error_with_expansion_stack(program[block_ip].token, '`end` can only close `else`, `do` or `macro` blocks for now')
exit(1)
ip += 1
elif token.value == Keyword.WHILE:
program.append(Op(typ=OpType.WHILE, token=token))
stack.append(ip)
ip += 1
elif token.value == Keyword.DO:
program.append(Op(typ=OpType.DO, token=token))
pre_do_ip = stack.pop()
assert program[pre_do_ip].typ == OpType.WHILE or program[pre_do_ip].typ == OpType.IF or program[pre_do_ip].typ == OpType.ELIF
program[ip].operand = pre_do_ip
stack.append(ip)
ip += 1
elif token.value == Keyword.INCLUDE:
if len(rtokens) == 0:
compiler_error_with_expansion_stack(token, "expected path to the include file but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.STR:
compiler_error_with_expansion_stack(token, "expected path to the include file to be %s but found %s" % (human(TokenType.STR), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
file_included = False
for include_path in include_paths:
try:
if token.expanded_count >= expansion_limit:
compiler_error_with_expansion_stack(token, "the include exceeded the expansion limit (it expanded %d times)" % token.expanded_count)
exit(1)
rtokens += reversed(lex_file(path.join(include_path, token.value), token))
file_included = True
break
except FileNotFoundError:
continue
if not file_included:
compiler_error_with_expansion_stack(token, "file `%s` not found" % token.value)
exit(1)
elif token.value == Keyword.MACRO:
if len(rtokens) == 0:
compiler_error_with_expansion_stack(token, "expected macro name but found nothing")
exit(1)
token = rtokens.pop()
if token.typ != TokenType.WORD:
compiler_error_with_expansion_stack(token, "expected macro name to be %s but found %s" % (human(TokenType.WORD), human(token.typ)))
exit(1)
assert isinstance(token.value, str), "This is probably a bug in the lexer"
if token.value in macros:
compiler_error_with_expansion_stack(token, "redefinition of already existing macro `%s`" % token.value)
compiler_note(macros[token.value].loc, "the first definition is located here")
exit(1)
if token.value in INTRINSIC_BY_NAMES:
compiler_error_with_expansion_stack(token, "redefinition of an intrinsic word `%s`. Please choose a different name for your macro." % (token.value, ))
exit(1)
macro = Macro(token.loc, [])
macros[token.value] = macro
nesting_depth = 0
while len(rtokens) > 0:
token = rtokens.pop()
if token.typ == TokenType.KEYWORD and token.value == Keyword.END and nesting_depth == 0:
break
else:
macro.tokens.append(token)
if token.typ == TokenType.KEYWORD:
if token.value in [Keyword.IF, Keyword.WHILE, Keyword.MACRO]:
nesting_depth += 1
elif token.value == Keyword.END:
nesting_depth -= 1
if token.typ != TokenType.KEYWORD or token.value != Keyword.END:
compiler_error_with_expansion_stack(token, "expected `end` at the end of the macro definition but got `%s`" % (token.value, ))
exit(1)
else:
assert False, 'unreachable';
else:
assert False, 'unreachable'
if len(stack) > 0:
compiler_error_with_expansion_stack(program[stack.pop()].token, 'unclosed block')
exit(1)
return program
def find_col(line: str, start: int, predicate: Callable[[str], bool]) -> int:
while start < len(line) and not predicate(line[start]):
start += 1
return start
def unescape_string(s: str) -> str:
# NOTE: unicode_escape assumes latin-1 encoding, so we kinda have
# to do this weird round trip
return s.encode('utf-8').decode('unicode_escape').encode('latin-1').decode('utf-8')
def find_string_literal_end(line: str, start: int) -> int:
prev = line[start]
while start < len(line):
curr = line[start]
if curr == '"' and prev != '\\':
break
prev = curr
start += 1
return start
def lex_lines(file_path: str, lines: List[str]) -> Generator[Token, None, None]:
assert len(TokenType) == 6, 'Exhaustive handling of token types in lex_lines'
row = 0
str_literal_buf = ""
while row < len(lines):
line = lines[row]
col = find_col(line, 0, lambda x: not x.isspace())
col_end = 0
while col < len(line):
loc = (file_path, row + 1, col + 1)
if line[col] == '"':
while row < len(lines):
start = col
if str_literal_buf == "":
start += 1
else:
line = lines[row]
col_end = find_string_literal_end(line, start)
if col_end >= len(line) or line[col_end] != '"':
str_literal_buf += line[start:]
row +=1
col = 0
else:
str_literal_buf += line[start:col_end]
break
if row >= len(lines):
compiler_error(loc, "unclosed string literal")
exit(1)
assert line[col_end] == '"'
col_end += 1
text_of_token = str_literal_buf
str_literal_buf = ""
if col_end < len(line) and line[col_end] == 'c':
col_end += 1
yield Token(TokenType.CSTR, text_of_token, loc, unescape_string(text_of_token))
else:
yield Token(TokenType.STR, text_of_token, loc, unescape_string(text_of_token))
col = find_col(line, col_end, lambda x: not x.isspace())
elif line[col] == "'":
col_end = find_col(line, col+1, lambda x: x == "'")
if col_end >= len(line) or line[col_end] != "'":
compiler_error(loc, "unclosed character literal")
exit(1)
text_of_token = line[col+1:col_end]
char_bytes = unescape_string(text_of_token).encode('utf-8')
if len(char_bytes) != 1:
compiler_error(loc, "only a single byte is allowed inside of a character literal")
exit(1)
yield Token(TokenType.CHAR, text_of_token, loc, char_bytes[0])
col = find_col(line, col_end+1, lambda x: not x.isspace())
else:
col_end = find_col(line, col, lambda x: x.isspace())
text_of_token = line[col:col_end]
try:
yield Token(TokenType.INT, text_of_token, loc, int(text_of_token))
except ValueError:
if text_of_token in KEYWORD_NAMES:
yield Token(TokenType.KEYWORD, text_of_token, loc, KEYWORD_NAMES[text_of_token])
else:
if text_of_token.startswith("//"):
break
yield Token(TokenType.WORD, text_of_token, loc, text_of_token)
col = find_col(line, col_end, lambda x: not x.isspace())
row += 1
def lex_file(file_path: str, expanded_from: Optional[Token] = None) -> List[Token]:
with open(file_path, "r", encoding='utf-8') as f:
result = [token for token in lex_lines(file_path, f.readlines())]
for token in result:
if expanded_from is not None:
token.expanded_from = expanded_from
token.expanded_count = expanded_from.expanded_count + 1
return result
def parse_program_from_file(file_path: str, include_paths: List[str], expansion_limit: int) -> Program:
return parse_program_from_tokens(lex_file(file_path), include_paths, expansion_limit)
def cmd_call_echoed(cmd: List[str], silent: bool) -> int:
if not silent:
print("[CMD] %s" % " ".join(map(shlex.quote, cmd)))
return subprocess.call(cmd)
def generate_control_flow_graph_as_dot_file(program: Program, dot_path: str):
with open(dot_path, "w") as f:
f.write("digraph Program {\n")
assert len(OpType) == 10, "Exhaustive handling of OpType in generate_control_flow_graph_as_dot_file()"
for ip in range(len(program)):
op = program[ip]
if op.typ == OpType.INTRINSIC:
assert isinstance(op.operand, Intrinsic)
f.write(f" Node_{ip} [label={repr(repr(INTRINSIC_NAMES[op.operand]))}];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_STR:
assert isinstance(op.operand, str)
f.write(f" Node_{ip} [label={repr(repr(op.operand))}];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_CSTR:
assert isinstance(op.operand, str)
f.write(f" Node_{ip} [label={repr(repr(op.operand))}];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.PUSH_INT:
assert isinstance(op.operand, int)
f.write(f" Node_{ip} [label={op.operand}]\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.IF:
f.write(f" Node_{ip} [shape=record label=if];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.WHILE:
f.write(f" Node_{ip} [shape=record label=while];\n")
f.write(f" Node_{ip} -> Node_{ip + 1};\n")
elif op.typ == OpType.DO:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=do];\n")
f.write(f" Node_{ip} -> Node_{ip + 1} [label=true];\n")
f.write(f" Node_{ip} -> Node_{op.operand} [label=false style=dashed];\n")
elif op.typ == OpType.ELSE:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=else];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
elif op.typ == OpType.ELIF:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=elif];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
elif op.typ == OpType.END:
assert isinstance(op.operand, OpAddr)
f.write(f" Node_{ip} [shape=record label=end];\n")
f.write(f" Node_{ip} -> Node_{op.operand};\n")
else:
assert False, f"unimplemented operation {op.typ}"
f.write(f" Node_{len(program)} [label=halt];\n")
f.write("}\n")
def usage(compiler_name: str):
print("Usage: %s [OPTIONS] <SUBCOMMAND> [ARGS]" % compiler_name)
print(" OPTIONS:")
print(" -debug Enable debug mode.")
print(" -I <path> Add the path to the include search list")
print(" -E <expansion-limit> Macro and include expansion limit. (Default %d)" % DEFAULT_EXPANSION_LIMIT)
print(" -unsafe Disable type checking.")
print(" SUBCOMMAND:")
print(" sim <file> Simulate the program")
print(" com [OPTIONS] <file> Compile the program")
print(" OPTIONS:")
print(" -r Run the program after successful compilation")
print(" -o <file|dir> Customize the output path")
print(" -s Silent mode. Don't print any info about compilation phases.")
print(" -cf Dump Control Flow graph of the program in a dot format.")
print(" help Print this help to stdout and exit with 0 code")
if __name__ == '__main__' and '__file__' in globals():
argv = sys.argv
assert len(argv) >= 1
compiler_name, *argv = argv
include_paths = ['.', './std/']
expansion_limit = DEFAULT_EXPANSION_LIMIT
unsafe = False
while len(argv) > 0:
if argv[0] == '-debug':
argv = argv[1:]
debug = True
elif argv[0] == '-I':
argv = argv[1:]
if len(argv) == 0:
usage(compiler_name)
print("[ERROR] no path is provided for `-I` flag", file=sys.stderr)
exit(1)
include_path, *argv = argv
include_paths.append(include_path)
elif argv[0] == '-E':
argv = argv[1:]
if len(argv) == 0:
usage(compiler_name)
print("[ERROR] no value is provided for `-E` flag", file=sys.stderr)
exit(1)
arg, *argv = argv
expansion_limit = int(arg)
elif argv[0] == '-unsafe':
argv = argv[1:]
unsafe = True
else:
break
if debug:
print("[INFO] Debug mode is enabled")
if len(argv) < 1:
usage(compiler_name)
print("[ERROR] no subcommand is provided", file=sys.stderr)
exit(1)
subcommand, *argv = argv
program_path: Optional[str] = None
if subcommand == "sim":
if len(argv) < 1:
usage(compiler_name)
print("[ERROR] no input file is provided for the simulation", file=sys.stderr)
exit(1)
program_path, *argv = argv
include_paths.append(path.dirname(program_path))
program = parse_program_from_file(program_path, include_paths, expansion_limit);
if not unsafe:
type_check_program(program)
simulate_little_endian_linux(program, [program_path] + argv)
elif subcommand == "com":
silent = False
control_flow = False
run = False
output_path = None
while len(argv) > 0:
arg, *argv = argv
if arg == '-r':
run = True
elif arg == '-s':
silent = True
elif arg == '-o':
if len(argv) == 0:
usage(compiler_name)
print("[ERROR] no argument is provided for parameter -o", file=sys.stderr)
exit(1)
output_path, *argv = argv
elif arg == '-cf':
control_flow = True
else:
program_path = arg
break
if program_path is None:
usage(compiler_name)
print("[ERROR] no input file is provided for the compilation", file=sys.stderr)
exit(1)
basename = None
basedir = None
if output_path is not None:
if path.isdir(output_path):
basename = path.basename(program_path)
if basename.endswith(PORTH_EXT):
basename = basename[:-len(PORTH_EXT)]
basedir = path.dirname(output_path)
else:
basename = path.basename(output_path)
basedir = path.dirname(output_path)
else:
basename = path.basename(program_path)
if basename.endswith(PORTH_EXT):
basename = basename[:-len(PORTH_EXT)]
basedir = path.dirname(program_path)
# if basedir is empty we should "fix" the path appending the current working directory.
# So we avoid `com -r` to run command from $PATH.
if basedir == "":
basedir = os.getcwd()
basepath = path.join(basedir, basename)
include_paths.append(path.dirname(program_path))
program = parse_program_from_file(program_path, include_paths, expansion_limit);
if control_flow:
dot_path = basepath + ".dot"
if not silent:
print(f"[INFO] Generating {dot_path}")
generate_control_flow_graph_as_dot_file(program, dot_path)
cmd_call_echoed(["dot", "-Tsvg", "-O", dot_path], silent)
if not unsafe:
type_check_program(program)
if not silent:
print("[INFO] Generating %s" % (basepath + ".asm"))
generate_nasm_linux_x86_64(program, basepath + ".asm")
cmd_call_echoed(["nasm", "-felf64", basepath + ".asm"], silent)
cmd_call_echoed(["ld", "-o", basepath, basepath + ".o"], silent)
if run:
exit(cmd_call_echoed([basepath] + argv, silent))
elif subcommand == "help":
usage(compiler_name)
exit(0)
else:
usage(compiler_name)
print("[ERROR] unknown subcommand %s" % (subcommand), file=sys.stderr)
exit(1)
|
Jael-G/lifepy
|
examples/lifepygame.py
|
import pygame
from lifepy import lifepy
from pygame import event
'''
Using Pygame to show a lifepy simulation
'''
DISPLAY_H =500
DISPLAY_W = 500
CELL_SPACE = 10
'''
Note:
The width and height are the size of the window, and the cell space is the amount of pixels from said window that EACH cell will take.
Therefore, cell space must be a divisor of both width and height.
Furthermore, reducing the cell space on the same width and height means that more cells will appear in the same
width and height.
Example:
if height and with are 100 and cell space is 10, the array will be (100/10 * 100/10) = 10*10, but if the cell space is 1, then the array will
be 100*100, without having to increase screen size.
'''
DEAD_COLOR = (0, 0, 0)
LIVE_COLOR = (200, 200, 200)
SIMULATION = lifepy.Simulator(m_size=int(DISPLAY_W/CELL_SPACE), n_size=int(DISPLAY_H/CELL_SPACE), mode='ASCII')
def main():
if DISPLAY_W%CELL_SPACE!=0 or DISPLAY_H%CELL_SPACE!=0:
print("The cell space must be a divisor of the width and height of the screen size.")
quit()
global DISPLAY, SIMULATION
SIMULATION.generate_array()
pygame.init()
DISPLAY = pygame.display.set_mode((DISPLAY_W, DISPLAY_H))
while True:
generate_grid()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
pygame.display.update()
def generate_grid():
global SIMULATION
contine_running = SIMULATION.step()
if not contine_running:
input("Press enter to exit")
pygame.quit()
quit()
for i in range(0, DISPLAY_W, CELL_SPACE):
for j in range(0, DISPLAY_H, CELL_SPACE):
cell = pygame.Rect(i, j, CELL_SPACE, CELL_SPACE)
if SIMULATION.get_array()[int(i/CELL_SPACE)][int(j/CELL_SPACE)]:
pygame.draw.rect(DISPLAY, LIVE_COLOR, cell, 0)
else:
pygame.draw.rect(DISPLAY, DEAD_COLOR, cell, 0)
main()
|
Jael-G/lifepy
|
src/lifepy/lifepy.py
|
<gh_stars>0
import curses
import numpy
import random
import sys
import time
class Simulator:
def __init__(self, m_size=50, n_size=50, mode='DEFAULT', live_char='#', dead_char='-'):
self.__m_size = m_size
self.__n_size = n_size
self.mode = mode
self.__array = None
self.live_char = live_char
self.dead_char = dead_char
def get_array(self):
copied_array = self.__array.copy()
copied_array = numpy.delete(copied_array, 0, 0)
copied_array = numpy.delete(copied_array, self.__m_size, 0)
copied_array = numpy.delete(copied_array, 0, 1)
copied_array = numpy.delete(copied_array, self.__n_size, 1)
return copied_array.copy()
def get_m_size(self):
'''
Returns the size of m (rows)
'''
return self.__m_size
def get_n_size(self):
'''
Returns the size of n (columns)
'''
return self.__n_size
def __array_not_generated(self):
return self.__array is None
def generate_array(self):
'''
Generates an array with random living and dead cells
Generates an array of the specified m*n size, plus the "border"
Iterates throught the array (without including the border) and sets the value randomly
to 0 or 1
Returns 1 if succesfully generated, else returns 0
'''
try:
generated_array = numpy.zeros((self.__m_size + 2, self.__n_size + 2))
for i in range(1, self.__m_size + 1):
for j in range(1, self.__n_size + 1):
generated_array[i][j] = random.randint(0, 1)
self.__array = generated_array.copy()
return 1
except Exception as e:
self.__array= None #Added just in case, don't want a partially generated array
print(e)
return 0
def load_array(self, array):
'''
Loads a given array
Creates a temporary array to add the empty columns and rows that make the "border",
then sets self.__array to a copy of the temporary array
Returns 1 if the array is loaded succesfully, else returns 0
'''
if array.shape == (self.__m_size, self.__n_size):
temp_array = numpy.zeros((1, self.__n_size + 2))
for row in array:
new_row = [0.] + list(row) + [0.]
temp_array = numpy.vstack([temp_array, new_row])
temp_array = numpy.vstack([temp_array, numpy.zeros((1, self.__n_size + 2))])
self.__array = temp_array.copy()
return 1
else:
raise Exception("Invalid array size. Expected size: ({}, {}) but {} was given".format(self.__m_size, self.__n_size,
array.shape))
return 0
def get_simulation(self,printout=False):
'''
Returns the string used to represet the array when doing the simulation
'''
if self.__array_not_generated():
raise Exception("Array is NoneType. Array most be generated or loaded.")
array_string = ''
for i in range(1, self.__m_size + 1):
for j in range(1, self.__n_size + 1):
if self.mode == 'DEFAULT':
if self.__array[i][j]:
array_string += "\u001b[47m" + " " + "\033[0;0m"
else:
array_string += "\u001b[00;1m" + " " + "\033[0;0m"
array_string += "\033[0;0m"
elif self.mode == 'ASCII':
if self.__array[i][j]:
array_string += f"{self.live_char}"
else:
array_string += f"{self.dead_char}"
array_string += "\n"
if printout:
print(array_string)
return array_string
def step(self, printout=False):
'''
Advances the simulation 1 step foward. If succesful returns 1. If it's unable to do a step
(the simulation has ended), returns 0.
'''
if self.__array_not_generated():
raise Exception("Array is NoneType. Array most be generated or loaded.")
if not numpy.count_nonzero(self.__array) == 0:
try:
copied_array = self.__array.copy()
final_array = numpy.zeros((self.__m_size + 2, self.__n_size + 2))
for i in range(1, self.__m_size + 1):
for j in range(1, self.__n_size + 1):
current_cell = copied_array[i][j]
surrounding_cells = [
copied_array[i - 1][j - 1], copied_array[i - 1][j], copied_array[i - 1][j + 1],
copied_array[i][j - 1], copied_array[i][j + 1],
copied_array[i + 1][j - 1], copied_array[i + 1][j], copied_array[i + 1][j + 1]
]
alive_surrounding_cells = surrounding_cells.count(True)
if (not current_cell and alive_surrounding_cells == 3) or (
current_cell and alive_surrounding_cells == 2 or alive_surrounding_cells == 3):
final_array[i][j] = True
elif current_cell and alive_surrounding_cells != 2 and alive_surrounding_cells != 3:
final_array[i][j] = False
self.__array = final_array.copy()
if printout:
self.get_simulation(printout)
return 1
except KeyboardInterrupt:
self.__array=copied_array.copy()
raise KeyboardInterrupt
else:
print("Cannot do any more steps. All life has ended in the simulation")
return 0
def continuous_simulation(self, step_delay=0, printout=False):
'''
Show the simulation in a curses window
Only 'ASCII' mode works.
'''
saved_mode = self.mode
self.mode = 'ASCII'
if self.__array_not_generated():
raise Exception("Array is NoneType. Array most be generated or loaded.")
if printout:
simulation_screen = curses.initscr()
simulation_screen.clear()
try:
while True:
if self.step(False):
if printout:
simulation_screen.clear()
try:
simulation_screen.addstr(0,0, self.get_simulation(printout=False))
except:
pass
simulation_screen.refresh()
time.sleep(step_delay)
else:
if printout:
simulation_screen.clear()
curses.nocbreak()
curses.echo()
curses.endwin()
simulation_screen.clear()
print("Cannot do any more steps. All life has ended in the simulation")
break
except KeyboardInterrupt:
if printout:
simulation_screen.clear()
curses.nocbreak()
curses.echo()
curses.endwin()
simulation_screen.clear()
print("Exiting simulation")
self.mode = saved_mode
|
kirkeaton/sudoku-image-solver
|
imsudoku.py
|
<reponame>kirkeaton/sudoku-image-solver
import cv2
import homography
import numpy as np
import os
from libsvm.svmutil import *
from PIL import Image
from pylab import *
from scipy import ndimage
H = 1
# helper functions for the sudoku solver
# Ref(s):
# http://stackoverflow.com/questions/201461/shortest-sudoku-solver-in-python-how-does-it-work
def same_row(i, j):
return i / 9 == j / 9
def same_col(i, j):
return (i - j) % 9 == 0
def same_block(i, j):
return i / 27 == j / 27 and i % 9 / 3 == j % 9 / 3
# function that solves a sudoku puzzle
# Ref(s):
# http://stackoverflow.com/questions/201461/shortest-sudoku-solver-in-python-how-does-it-work
def solve_puzzle(a):
i = a.find("0")
if i == -1:
print("solved")
# puzzle is solved, format the output
soln = []
for j in range(81):
soln.append(int(a[j]))
print((array(soln).reshape(9, 9)))
return None
# determine any excluded numbers
excluded_numbers = set()
for j in range(81):
if same_row(i, j) or same_col(i, j) or same_block(i, j):
excluded_numbers.add(a[j])
for m in "123456789":
if m not in excluded_numbers:
# At this point, m is not excluded by any row, column, or block, so let's place it and recurse
return solve_puzzle(a[:i] + m + a[i + 1 :])
# function that performs a homography based on four points
# Ref(s):
# <NAME>., Programming Computer Vision with Python, O'Reilly (2012)
def perform_homography(x):
global H
fp = array([array([p[1], p[0], 1]) for p in x]).T
tp = array([[0, 0, 1], [0, 1000, 1], [1000, 1000, 1], [1000, 0, 1]]).T
# estimate the homography
H = homography.H_from_points(tp, fp)
# helper function for geometric_transform
# Ref(s):
# <NAME>., Programming Computer Vision with Python, O'Reilly (2012)
def warpfcn(x):
x = array([x[0], x[1], 1])
xt = dot(H, x)
xt = xt / xt[2]
return xt[0], xt[1]
# finds the edges of a straigtened sudoku puzzle
def find_sudoku_edges(im, axis=0):
size = im.shape[axis]
x = []
for i in range(10):
x.append((i * size) / 9)
return x
# resizes an image
def imresize(im, sz):
pil_im = Image.fromarray(uint8(im))
return array(pil_im.resize(sz))
# computes a feature vector for an ocr image patch
# Ref(s):
# <NAME>., Programming Computer Vision with Python, O'Reilly (2012)
def compute_feature(im):
# resize and remove border
norm_im = imresize(im, (30, 30))
norm_im = norm_im[3:-3, 3:-3]
return norm_im.flatten()
# returns labels & ocr features for all images in the path
# Ref(s):
# <NAME>., Programming Computer Vision with Python, O'Reilly (2012)
def load_ocr_data(path):
# get a list of all the images
imlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".jpg")]
# create labels
labels = [int(imfile.split("/")[-1][0]) for imfile in imlist]
# create features from the images
features = []
for imname in imlist:
im = array(Image.open(imname).convert("L"))
features.append(compute_feature(im))
return array(features), labels
def main():
print("Generating OCR data")
print("===================")
# training data
features, labels = load_ocr_data("ocr_data/training/")
# testing data
test_features, test_labels = load_ocr_data("ocr_data/testing/")
# train a linear SVM classifier
features = list(map(list, features))
test_features = list(map(list, test_features))
prob = svm_problem(labels, features)
param = svm_parameter("-t 0 -q")
m = svm_train(prob, param)
# how did the training do?
res = svm_predict(labels, features, m)
# how does it perform on the test set?
res = svm_predict(test_labels, test_features, m)
print("")
# process all 60 of our images
for fp in range(1, 61):
# input & actual results
infile = "sudoku_images/sudoku%d.jpg" % (fp)
ground = "sudoku_images/sudoku%d.sud" % (fp)
# Ref(s) for lines 106 to 131
# http://stackoverflow.com/a/11366549
# read the image, blur it
# create a structuring element to pass to
orig = cv2.imread(infile, 0)
blur = cv2.GaussianBlur(orig, (11, 11), 0)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
# perform a morphology based on the previously computed kernel
close = cv2.morphologyEx(blur, cv2.MORPH_CLOSE, kernel1)
div = np.float32(blur) / (close)
res = np.uint8(cv2.normalize(div, div, 0, 255, cv2.NORM_MINMAX))
res2 = cv2.cvtColor(res, cv2.COLOR_GRAY2BGR)
# perform an adaptive threshold and find the contours
thresh = cv2.adaptiveThreshold(res, 255, 0, 1, 19, 2)
contours, hier = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
# find the sudoku gameboard by looking for the largest square in image
biggest = None
max_area = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
if area > max_area and len(approx) == 4:
biggest = approx
max_area = area
# calculate the center of the square
M = cv2.moments(biggest)
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
# find the location of the four corners
for a in range(0, 4):
# calculate the difference between the center
# of the square and the current point
dx = biggest[a][0][0] - cx
dy = biggest[a][0][1] - cy
if dx < 0 and dy < 0:
topleft = (biggest[a][0][0], biggest[a][0][1])
elif dx > 0 and dy < 0:
topright = (biggest[a][0][0], biggest[a][0][1])
elif dx > 0 and dy > 0:
botright = (biggest[a][0][0], biggest[a][0][1])
elif dx < 0 and dy > 0:
botleft = (biggest[a][0][0], biggest[a][0][1])
# the four corners from top left going clockwise
corners = []
corners.append(topleft)
corners.append(topright)
corners.append(botright)
corners.append(botleft)
# perform the homography
perform_homography(corners)
# perform a geometric transform to get just the puzzle in our image
fixed = array(
Image.fromarray(
ndimage.geometric_transform(blur, warpfcn, (1000, 1000)), "L"
)
)
# perform adaptive thresholding increase the contrast between paper and ink
fixed = cv2.adaptiveThreshold(
fixed, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2
)
# find the x and y edges
x = find_sudoku_edges(fixed, axis=0)
y = find_sudoku_edges(fixed, axis=1)
# crop each cell and add it to a list of crops
crops = []
for col in range(9):
for row in range(9):
crop = fixed[
int(y[col]) : int(y[col + 1]), int(x[row]) : int(x[row + 1])
]
crops.append(compute_feature(crop))
print(("Puzzle #%02d" % (fp)))
print("==========")
# check our results and formulate it into a 9x9 array
res, acc, vals = svm_predict(
loadtxt(ground).reshape(81), list(map(list, crops)), m
)
res_im = array(res).reshape(9, 9)
print("Result:")
print(res_im)
print("")
if acc[0] >= 100:
print("Puzzle extracted perfectly. Solving now.")
print("")
puzz = ""
puzzle = res_im.flatten()
for i in range(len(puzzle)):
puzz += str(int(puzzle[i]))
print("Solution:")
solve_puzzle(puzz)
print("")
else:
print("Failed to extract puzzle perfectly. Cannot solve.")
print("")
if __name__ == "__main__":
main()
|
qyxqyx/LWML
|
main.py
|
import numpy as np
import random
import tensorflow as tf
from lwau import LWAU
from tensorflow.python.platform import flags
import os
from task_generator import TaskGenerator
FLAGS = flags.FLAGS
flags.DEFINE_integer('metatrain_iterations', 60000, 'number of metatraining iterations.')
# Training options
flags.DEFINE_integer('num_classes', 5, 'number of classes used in classification')
flags.DEFINE_integer('meta_batch_size', 4, 'number of tasks sampled per meta-training iteration')
flags.DEFINE_float('meta_lr', 0.001, 'the meta learning rate')
flags.DEFINE_float('update_lr', 0.01, 'the inner-update learning rate')
flags.DEFINE_integer('update_batch_size', 1, 'K for K-shot learning.')
flags.DEFINE_integer('num_updates', 5, 'number of inner update steps during training.')
flags.DEFINE_integer('num_train_tasks', 20, 'number of meta training tasks.')
flags.DEFINE_float('l2_alpha', 0.001, 'param of the l2_norm')
flags.DEFINE_float('l1_alpha', 0.001, 'param of the l1_norm')
flags.DEFINE_float('dropout_rate', 0, 'dropout_rate of the FC layer')
flags.DEFINE_integer('base_num_filters', 16, 'number of filters for conv nets.')
flags.DEFINE_integer('test_num_updates', 10, 'number of inner update steps during testing')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', 'logs/miniimagenet1shot/', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')
flags.DEFINE_bool('data_aug', False, 'whether use the data augmentation.')
flags.DEFINE_string('backbone', 'Conv4', 'Conv4 or ResNet12 backone.')
if FLAGS.train:
NUM_TEST_POINTS = int(600/FLAGS.meta_batch_size)
else:
NUM_TEST_POINTS = 600
LEN_MODELS = 50
PRINT_INTERVAL = 50
TEST_PRINT_INTERVAL = PRINT_INTERVAL*6
def train(model, saver, sess, exp_string, task_generator, resume_itr=0):
print('Done initializing, starting training.')
print(exp_string)
prelosses, postlosses = [], []
models = {}
for itr in range(resume_itr, FLAGS.metatrain_iterations):
if FLAGS.backbone == 'Conv4':
feed_dict = {model.meta_lr: FLAGS.meta_lr}
else:
lr = FLAGS.meta_lr * 0.5 ** int(itr / 15000)
feed_dict = {model.meta_lr: lr}
inputa, labela, inputb, labelb = task_generator.get_data_n_tasks(FLAGS.meta_batch_size, train=True)
feed_dict[model.inputa] = inputa
feed_dict[model.labela] = labela
feed_dict[model.inputb] = inputb
feed_dict[model.labelb] = labelb
input_tensors = [model.metatrain_op]
input_tensors.extend([model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
input_tensors.extend([model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]])
result = sess.run(input_tensors, feed_dict)
prelosses.append(result[-2])
postlosses.append(result[-1])
if (itr!=0) and itr % PRINT_INTERVAL == 0:
print_str = 'Iteration ' + str(itr)
print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
print(print_str)
prelosses, postlosses = [], []
# sinusoid is infinite data, so no need to test on meta-validation set.
if (itr!=0) and itr % TEST_PRINT_INTERVAL == 0:
metaval_accuracies = []
for _ in range(NUM_TEST_POINTS):
feed_dict = {}
inputa, labela, inputb, labelb = task_generator.get_data_n_tasks(FLAGS.meta_batch_size, train=False)
feed_dict[model.inputa] = inputa
feed_dict[model.labela] = labela
feed_dict[model.inputb] = inputb
feed_dict[model.labelb] = labelb
input_tensors = [[model.metaval_total_accuracy1] + model.metaval_total_accuracies2]
result = sess.run(input_tensors, feed_dict)
metaval_accuracies.append(result[0])
metaval_accuracies = np.array(metaval_accuracies)
means = np.mean(metaval_accuracies, 0)
stds = np.std(metaval_accuracies, 0)
ci95 = 1.96 * stds / np.sqrt(NUM_TEST_POINTS)
print('----------------------------------------', itr)
print('Mean validation accuracy:', means)
print('Mean validation loss:', stds)
print('Mean validation stddev', ci95)
print('----------------------------------------', )
val_postaccs = max(means)
model_name = FLAGS.logdir + '/' + exp_string + '/model' + str(itr)
if len(models) >= LEN_MODELS:
min_acc, min_model = min(zip(models.values(), models.keys()))
if val_postaccs > min_acc:
del models[min_model]
models[model_name] = val_postaccs
saver.save(sess, model_name)
# os.remove(min_model+'.meta')
os.remove(min_model + '.data-00000-of-00001')
os.remove(min_model + '.index')
os.remove(model_name + '.meta')
else:
pass
max_acc, max_model = max(zip(models.values(), models.keys()))
print(max_model, ':', max_acc)
else:
models[model_name] = val_postaccs
saver.save(sess, model_name)
os.remove(model_name + '.meta')
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
def test(model, sess, task_generator):
np.random.seed(1)
random.seed(1)
metaval_accuracies = []
max_acc = 0
print(NUM_TEST_POINTS)
for _ in range(NUM_TEST_POINTS):
feed_dict = {model.meta_lr : 0.0}
inputa, labela, inputb, labelb = task_generator.get_data_n_tasks(FLAGS.meta_batch_size, train=False)
feed_dict[model.inputa] = inputa
feed_dict[model.labela] = labela
feed_dict[model.inputb] = inputb
feed_dict[model.labelb] = labelb
result = sess.run([model.metaval_total_accuracy1] + model.metaval_total_accuracies2, feed_dict)
metaval_accuracies.append(result)
metaval_accuracies = np.array(metaval_accuracies)
means = np.mean(metaval_accuracies, 0)
stds = np.std(metaval_accuracies, 0)
ci95 = 1.96*stds/np.sqrt(NUM_TEST_POINTS)
for mean_acc in means:
if mean_acc> max_acc:
max_acc=mean_acc
print('Mean validation accuracy:', means)
print('Mean validation loss:', stds)
print('Mean validation stddev', ci95)
return max_acc
def main():
FLAGS.logdir = 'logs/miniimagenet' + str(FLAGS.update_batch_size) + 'shot/'
if FLAGS.train == False:
orig_meta_batch_size = FLAGS.meta_batch_size
FLAGS.meta_batch_size = 1
orig_update_batch_size = FLAGS.update_batch_size
task_generator = TaskGenerator(FLAGS.update_batch_size+15, FLAGS.meta_batch_size)
dim_output = task_generator.dim_output
dim_input = task_generator.dim_input
model = LWAU(dim_input, dim_output)
if FLAGS.train :
model.construct_model(num_updates=FLAGS.num_updates, train=True)
model.construct_model(num_updates=FLAGS.test_num_updates, train=False)
# model.summ_op = tf.summary.merge_all()
saver = loader = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES), max_to_keep=0)
sess = tf.InteractiveSession()
if FLAGS.train == False:
# change to original meta batch size when loading model.
FLAGS.meta_batch_size = orig_meta_batch_size
FLAGS.update_batch_size = orig_update_batch_size
exp_string = str(FLAGS.num_classes)+'.mbs_'+str(FLAGS.meta_batch_size)
exp_string += '.nstep_' + str(FLAGS.num_updates) + '.tnstep_' + str(FLAGS.test_num_updates)
exp_string += '.ubs_' + str(FLAGS.update_batch_size) + '.nts_' + str(FLAGS.num_train_tasks)
exp_string += '.l1_' + str(FLAGS.l1_alpha) +'.l2_' + str(FLAGS.l2_alpha)
exp_string += '.lr_' + str(FLAGS.meta_lr) + '.ulr_' + str(FLAGS.update_lr)
exp_string += '.drop_' + str(FLAGS.dropout_rate) + '.nfs_' + str(FLAGS.base_num_filters)
resume_itr = 0
model_file = None
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if FLAGS.resume:
model_file = tf.train.latest_checkpoint(FLAGS.logdir + '/' + exp_string)
if FLAGS.test_iter > 0:
model_file = model_file[:model_file.index('model')] + 'model' + str(FLAGS.test_iter)
if model_file:
ind1 = model_file.index('model')
resume_itr = int(model_file[ind1+5:])
print("Restoring model weights from " + model_file)
saver.restore(sess, model_file)
if FLAGS.train:
train(model, saver, sess, exp_string, task_generator, resume_itr)
else:
import os
max_accs = 0
models = os.listdir(FLAGS.logdir + exp_string)
model_epochs = []
for model_file in models:
if 'model' in model_file and 'index' in model_file:
i = model_file.find('del')
j = model_file.find('.')
model_epoch = model_file[i + 3:j]
model_epochs.append(int(model_epoch))
model_epochs.sort()
max_epoch = 0
for epoch in model_epochs:
if epoch > float(FLAGS.metatrain_iterations) / 20:
model_file = FLAGS.logdir + exp_string + '/model' + str(epoch)
saver.restore(sess, model_file)
print("testing model: " + model_file)
acc = test(model, sess, task_generator)
if acc > max_accs:
max_accs = acc
max_epoch = epoch
print('----------max_acc:', max_accs, '-----------max_model:', max_epoch)
else:
pass
if __name__ == "__main__":
main()
|
qyxqyx/LWML
|
utils.py
|
import os
import random
import tensorflow as tf
from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
## Image helper
def get_images(paths, nb_samples=None):
support = []
query = []
for i, path in enumerate(paths):
images = os.listdir(path)
images = [os.path.join(path, image) for image in images]
sampled_images = random.sample(images, nb_samples)
for j, image in enumerate(sampled_images):
if j < FLAGS.update_batch_size:
support.append((i, image))
else:
query.append((i, image))
return support, query
## Network helpers
def conv_block(inp, cweight, bweight, reuse, scope, activation=tf.nn.relu, pool=True, max_pool_pad='VALID', residual=False):
""" Perform, conv, batch norm, nonlinearity, and max pool """
stride, no_stride = [1,2,2,1], [1,1,1,1]
conv_output = tf.nn.conv2d(inp, cweight, no_stride, 'SAME') + bweight
normed = normalize(conv_output, activation, reuse, scope)
if pool:
normed = tf.nn.max_pool(normed, stride, stride, max_pool_pad)
return normed
def normalize(inp, activation, reuse, scope):
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
## Loss functions
def mse(pred, label):
pred = tf.reshape(pred, [-1])
label = tf.reshape(label, [-1])
return tf.reduce_mean(tf.square(pred-label))
def xent(pred, label):
# Note - with tf version <=0.12, this loss has incorrect 2nd derivatives
return tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label) / FLAGS.update_batch_size
|
qyxqyx/LWML
|
networks.py
|
<gh_stars>1-10
import tensorflow as tf
from tensorflow.python.platform import flags
from utils import mse, xent, conv_block, normalize
import numpy as np
FLAGS = flags.FLAGS
class Conv_4(object):
def __init__(self):
'''
Conv-4 backbone
'''
self.channels = 3
self.dim_hidden = FLAGS.base_num_filters
self.dim_output = FLAGS.num_classes
self.img_size = 84
def construct_weights(self):
weights1 = {}
weights2 = {}
weights3 = {}
weights4 = {}
weights5 = {}
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
k = 3
weights1['conv1'] = tf.get_variable('conv1', [k, k, self.channels, self.dim_hidden],
initializer=conv_initializer, dtype=dtype)
weights1['b1'] = tf.Variable(tf.zeros([self.dim_hidden]), name='b1')
weights2['conv2'] = tf.get_variable('conv2', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer, dtype=dtype)
weights2['b2'] = tf.Variable(tf.zeros([self.dim_hidden]), name='b2')
weights3['conv3'] = tf.get_variable('conv3', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer, dtype=dtype)
weights3['b3'] = tf.Variable(tf.zeros([self.dim_hidden]), name='b3')
weights4['conv4'] = tf.get_variable('conv4', [k, k, self.dim_hidden, self.dim_hidden],
initializer=conv_initializer, dtype=dtype)
weights4['b4'] = tf.Variable(tf.zeros([self.dim_hidden]), name='b4')
weights5['w5'] = tf.get_variable('w5', [self.dim_hidden * 5 * 5, self.dim_output], initializer=fc_initializer)
weights5['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='b5')
return weights1, weights2, weights3, weights4, weights5
def forward(self, inp, weights, reuse=False, scope=''):
# reuse is for the normalization parameters.
channels = self.channels
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, channels])
hidden1 = conv_block(inp, weights['conv1'], weights['b1'], reuse, scope + '0')
hidden2 = conv_block(hidden1, weights['conv2'], weights['b2'], reuse, scope + '1')
hidden3 = conv_block(hidden2, weights['conv3'], weights['b3'], reuse, scope + '2')
hidden4 = conv_block(hidden3, weights['conv4'], weights['b4'], reuse, scope + '3')
hidden4 = tf.reshape(hidden4, [-1, np.prod([int(dim) for dim in hidden4.get_shape()[1:]])])
return tf.matmul(hidden4, weights['w5']) + weights['b5']
class ResNet12(object):
'''
resnet12 backbone
'''
def __init__(self):
self.channels = 3
self.dim_hidden = FLAGS.base_num_filters
self.dim_output = FLAGS.num_classes
self.img_size = 84
self.train_flag = True
def construct_weights(self):
weights = {}
weights1, weights2, weights3, weights4, weights5 = {}, {}, {}, {}, {}
k = 3
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
for i in range(4):
block_name = str(i+1)
for j in ['a', 'b', 'c']:
var_name = block_name+'/'+j+'/conv/'+'kernel'
var_filters = FLAGS.base_num_filters * np.power(2, i)
if i == 0 and j == 'a':
input_filters = 3
elif j == 'a':
input_filters = var_filters / 2
else:
input_filters = var_filters
var_shape = [3, 3, input_filters, var_filters]
weights[var_name] = tf.get_variable(var_name,
var_shape,
initializer=conv_initializer,
dtype=dtype)
var_name = block_name + '/' + j + '/conv/' + 'bias'
var_shape = [var_filters, ]
weights[var_name] = tf.get_variable(var_name,
var_shape,
initializer=fc_initializer,
dtype=dtype)
var_name = block_name + '/shortcut/conv/kernel'
var_filters = FLAGS.base_num_filters * np.power(2, i)
if i == 0:
input_filters = 3
else:
input_filters = var_filters / 2
var_shape = [1, 1, input_filters, var_filters]
weights[var_name] = tf.get_variable(var_name,
var_shape,
initializer=conv_initializer,
dtype=dtype)
var_name = block_name + '/shortcut/conv/bias'
var_shape = [var_filters, ]
weights[var_name] = tf.get_variable(var_name,
var_shape,
initializer=fc_initializer,
dtype=dtype)
weights['5/kernel'] = tf.get_variable('dense/kernel',
[FLAGS.base_num_filters * np.power(2, 3), self.dim_output],
initializer=fc_initializer)
weights['5/bias'] = tf.get_variable('dense/bias', [self.dim_output],
initializer=fc_initializer)
for key, var in weights.items():
if key[0] == '1':
weights1[key] = var
elif key[0] == '2':
weights2[key] = var
elif key[0] == '3':
weights3[key] = var
elif key[0] == '4':
weights4[key] = var
else:
weights5[key] = var
return weights1, weights2, weights3, weights4, weights5
def forward(self, inp, weights, reuse=False, scope=''):
feature = tf.reshape(inp, [-1, 84, 84, 3])
for i in range(4):
block_name = str(i + 1)
kernel_name = block_name + '/shortcut/conv/kernel'
bias_name = block_name + '/shortcut/conv/bias'
shortcut = tf.nn.convolution(feature, weights[kernel_name], padding='SAME', strides=[1,1]) + weights[bias_name]
for j in ['a', 'b']:
kernel_name = block_name + '/' + j + '/conv/' + 'kernel'
bias_name = block_name + '/' + j + '/conv/' + 'bias'
feature = tf.nn.convolution(feature, weights[kernel_name], padding='SAME', strides=[1, 1]) + \
weights[bias_name]
feature = tf.layers.batch_normalization(feature, training=True,
name=block_name + '/' + j + '/bn',
reuse=reuse)
feature = tf.nn.relu(feature)
kernel_name = block_name + '/c/conv/' + 'kernel'
bias_name = block_name + '/c/conv/' + 'bias'
feature = tf.nn.convolution(feature, weights[kernel_name], padding='SAME', strides=[1, 1]) + \
weights[bias_name]
feature = feature + shortcut
feature = tf.layers.batch_normalization(feature, training=True,
name=block_name + '/' + j + '/bn',
reuse=reuse)
feature = tf.nn.relu(feature)
feature = tf.layers.max_pooling2d(feature, [2, 2], [2, 2], 'same')
feature = tf.reduce_mean(feature, axis=[1, 2])
if FLAGS.dropout_rate > 0:
feature = tf.layers.dropout(feature, FLAGS.dropout_rate, training=self.train_flag, seed=1)
fc1 = tf.matmul(feature, weights['5/kernel']) + weights['5/bias']
return fc1
|
qyxqyx/LWML
|
task_generator.py
|
""" Code for loading data. """
import numpy as np
import os
import random
import tensorflow as tf
from tensorflow.python.platform import flags
from utils import get_images
import os
import pwd
import cv2
import math
FLAGS = flags.FLAGS
def random_crop(img, scale=(0.6, 1.0), ratio=(3. / 4., 4. / 3.)):
shape = img.shape
area = shape[0] * shape[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= shape[1] and h <= shape[0]:
i = random.randint(0, shape[0] - h)
j = random.randint(0, shape[1] - w)
croped_img = img[i:i+h,j:j+w,:]
croped_img = cv2.resize(croped_img, (84, 84))
return croped_img
# Fallback
w = min(shape[0], shape[1])
i = (shape[0] - w) // 2
j = (shape[1] - w) // 2
croped_img = img[i:i + w, j:j + w, :]
croped_img = cv2.resize(croped_img, (84, 84))
return croped_img
def random_flip(img):
if random.random() < 0.5:
img = cv2.flip(img, 1)
return img
class TaskGenerator(object):
"""
Data Generator capable of generating batches of sinusoid or Omniglot data.
A "class" is considered a class of omniglot digits or a particular sinusoid function.
"""
def __init__(self, num_samples_per_class, batch_size, config={}):
"""
Args:
num_samples_per_class: num samples to generate per class in one batch
batch_size: size of meta batch size (e.g. number of functions)
"""
self.batch_size = batch_size
self.num_samples_per_class = num_samples_per_class
self.num_classes = config.get('num_classes', FLAGS.num_classes)
self.img_size = config.get('img_size', (84, 84))
self.dim_input = np.prod(self.img_size)*3
self.dim_output = self.num_classes
if pwd.getpwuid(os.getuid())[0] == 'qyx':
root = '/home/qyx/ML'
else:
root = '/home/yanlb/code/qyx'
metatrain_folder = config.get('metatrain_folder', root + '/miniimagenet/train')
if FLAGS.test_set:
metaval_folder = config.get('metaval_folder', root + '/miniimagenet/test')
else:
metaval_folder = config.get('metaval_folder', root + '/miniimagenet/val')
metatrain_folders = [os.path.join(metatrain_folder, label) \
for label in os.listdir(metatrain_folder) \
if os.path.isdir(os.path.join(metatrain_folder, label)) \
]
metaval_folders = [os.path.join(metaval_folder, label) \
for label in os.listdir(metaval_folder) \
if os.path.isdir(os.path.join(metaval_folder, label)) \
]
self.metatrain_character_folders = metatrain_folders
self.metaval_character_folders = metaval_folders
self.rotations = config.get('rotations', [0])
self.num_total_train_batches = FLAGS.num_train_tasks
self.num_total_val_batches = 600
self.pointer = 0
self.store_data_per_task(train=False)
def store_data_per_task(self, train=True):
folders = self.metaval_character_folders
self.val_tasks_data_classes = []
for i in range(self.num_total_val_batches):
task_folders = random.sample(folders, self.num_classes)
random.shuffle(task_folders)
support, query = get_images(task_folders, nb_samples=self.num_samples_per_class)
data_class_task = Files_per_task(support, query, i)
self.val_tasks_data_classes.append(data_class_task)
def read_data_per_tesk(self, task_index, train=True):
if train:
folders = self.metatrain_character_folders
task_folders = random.sample(folders, self.num_classes)
random.shuffle(task_folders)
train_files, test_files = get_images(task_folders, nb_samples=self.num_samples_per_class)
else:
task_class = self.val_tasks_data_classes[task_index]
train_files = task_class.support
test_files = task_class.query
random.shuffle(train_files)
random.shuffle(test_files)
image_list = []
label_list = []
for image_and_label in train_files:
image = cv2.imread(image_and_label[1])
if train and FLAGS.data_aug and (FLAGS.backbone=='ResNet12'):
if random.random() < 0.6:
image = random_crop(image)
image = random_flip(image)
im2 = image.astype(np.float32) / 256
im2 = im2.reshape(84, 84, 3)
image_list.append(im2[np.newaxis, :])
# label = np.array(image_and_label[0]).reshape((1,))
label = image_and_label[0]
label_list.append(label)
task_train_ims = np.concatenate(image_list, axis=0)
# task_train_lbls = np.concatenate(label_list, axis=0)
task_train_lbls = np.array(label_list)
task_train_lbls = make_one_hot(task_train_lbls, self.num_classes)
image_list = []
label_list = []
for image_and_label in test_files:
image = cv2.imread(image_and_label[1])
if train and FLAGS.data_aug and (FLAGS.backbone=='ResNet12'):
if random.random() < 0.6:
image = random_crop(image)
image = random_flip(image)
im2 = image.astype(np.float32) / 256
im2 = im2.reshape(84, 84, 3)
image_list.append(im2[np.newaxis, :])
# label = np.array(image_and_label[0]).reshape((1,))
label = image_and_label[0]
label_list.append(label)
task_test_ims = np.concatenate(image_list, axis=0)
# task_test_lbls = np.concatenate(label_list, axis=0)
task_test_lbls = np.array(label_list)
task_test_lbls = make_one_hot(task_test_lbls, self.num_classes)
return task_train_ims, task_train_lbls, task_test_ims, task_test_lbls
def get_data_n_tasks(self, meta_batch_size, train=True):
if train:
task_indexes = list(range(meta_batch_size))
else:
task_indexes = list(range(self.pointer, self.pointer + meta_batch_size))
if self.pointer + meta_batch_size >= self.num_total_val_batches:
self.pointer = 0
else:
self.pointer += meta_batch_size
train_ims = []
train_lbls = []
test_ims = []
test_lbls = []
for task_index in task_indexes:
task_train_ims, task_train_lbls, task_test_ims, task_test_lbls = \
self.read_data_per_tesk(task_index, train)
train_ims.append(task_train_ims[np.newaxis, :])
train_lbls.append(task_train_lbls[np.newaxis, :])
test_ims.append(task_test_ims[np.newaxis, :])
test_lbls.append(task_test_lbls[np.newaxis, :])
meta_train_ims = np.concatenate(train_ims, axis=0)
meta_train_lbls = np.concatenate(train_lbls, axis=0)
meta_test_ims = np.concatenate(test_ims, axis=0)
meta_test_lbls = np.concatenate(test_lbls, axis=0)
return meta_train_ims, meta_train_lbls, meta_test_ims, meta_test_lbls
def make_one_hot(data, classes):
return (np.arange(classes) == data[:, None]).astype(np.integer)
class Files_per_task(object):
def __init__(self, support, query, task_index):
self.support = support
self.query = query
self.task_index = task_index
def read_images(self):
pass
|
qyxqyx/LWML
|
lwau.py
|
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from utils import mse, xent, conv_block, normalize
from networks import Conv_4, ResNet12
FLAGS = flags.FLAGS
class LWAU:
def __init__(self, dim_input=1, dim_output=1):
self.dim_input = dim_input
self.dim_output = dim_output
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
self.update_lr = FLAGS.update_lr
if FLAGS.backbone == 'Conv4':
self.net = Conv_4()
else:
self.net = ResNet12()
self.forward = self.net.forward
self.construct_weights = self.net.construct_weights
self.loss_func = xent
self.classification = True
self.dim_hidden = 32
self.channels = 3
self.img_size = int(np.sqrt(self.dim_input/self.channels))
alpha_initializer = tf.initializers.random_uniform(minval=FLAGS.update_lr * 0.99, maxval=FLAGS.update_lr)
self.alpha1 = tf.get_variable('alpha1', shape=[1, ], dtype=tf.float32, initializer=alpha_initializer)
self.alpha2 = tf.get_variable('alpha2', shape=[1, ], dtype=tf.float32, initializer=alpha_initializer)
self.alpha3 = tf.get_variable('alpha3', shape=[1, ], dtype=tf.float32, initializer=alpha_initializer)
self.alpha4 = tf.get_variable('alpha4', shape=[1, ], dtype=tf.float32, initializer=alpha_initializer)
self.alpha5 = tf.get_variable('alpha5', shape=[1, ], dtype=tf.float32, initializer=alpha_initializer)
shape = [FLAGS.meta_batch_size, None, 84, 84, 3]
self.inputa = tf.placeholder(tf.float32, shape=shape)
shape = [FLAGS.meta_batch_size, None, 84, 84, 3]
self.inputb = tf.placeholder(tf.float32, shape=shape)
shape = [FLAGS.meta_batch_size, None, FLAGS.num_classes]
self.labela = tf.placeholder(tf.float32, shape=shape)
shape = [FLAGS.meta_batch_size, None, FLAGS.num_classes]
self.labelb = tf.placeholder(tf.float32, shape=shape)
def construct_model(self, num_updates=1, train=True):
# a: training data for inner gradient, b: test data for meta gradient
self.net.train_flag = train
with tf.variable_scope('', reuse=tf.AUTO_REUSE) as training_scope:
#with tf.variable_scope('model', reuse=None) as training_scope:
# alpha_vectors = []
if 'weights' in dir(self):
training_scope.reuse_variables()
weights1, weights2, weights3, weights4, weights5 = self.weights1, self.weights2, self.weights3, self.weights4, self.weights5
weights = self.weights
else:
# Define the weights
weights1, weights2, weights3, weights4, weights5 = self.construct_weights()
self.weights1, self.weights2, self.weights3, self.weights4, self.weights5 = weights1, weights2, weights3, weights4, weights5
self.weights = {}
self.weights.update(self.weights1)
self.weights.update(self.weights2)
self.weights.update(self.weights3)
self.weights.update(self.weights4)
self.weights.update(self.weights5)
weights = self.weights
def task_metalearn(inp, reuse=True):
""" Perform gradient descent for one task in the meta-batch. """
inputa, inputb, labela, labelb = inp
task_outputbs, task_lossesb = [], []
task_accuraciesb = []
task_outputa = self.forward(inputa, weights, reuse=reuse) # only reuse on the first iter
if reuse == False:
return None
task_lossa = self.loss_func(task_outputa, labela)
grads = tf.gradients(task_lossa, list(weights.values()))
gradients = dict(zip(weights.keys(), grads))
for key in weights.keys():
if key in weights1.keys():
gradients[key] = gradients[key]*self.alpha1
elif key in weights2.keys():
gradients[key] = gradients[key]*self.alpha2
elif key in weights3.keys():
gradients[key] = gradients[key]*self.alpha3
elif key in weights4.keys():
gradients[key] = gradients[key]*self.alpha4
elif key in weights5.keys():
gradients[key] = gradients[key]*self.alpha5
else:
pass
fast_weights = dict(zip(weights.keys(), [weights[key] - gradients[key] for key in weights.keys()]))
output = self.forward(inputb, fast_weights, reuse=True)
task_outputbs.append(output)
task_lossesb.append(self.loss_func(output, labelb))
for j in range(num_updates - 1):
loss = self.loss_func(self.forward(inputa, fast_weights, reuse=True), labela)
grads = tf.gradients(loss, list(fast_weights.values()))
gradients = dict(zip(fast_weights.keys(), grads))
for key in weights.keys():
if key in weights1.keys():
gradients[key] = gradients[key] * self.alpha1
elif key in weights2.keys():
gradients[key] = gradients[key] * self.alpha2
elif key in weights3.keys():
gradients[key] = gradients[key] * self.alpha3
elif key in weights4.keys():
gradients[key] = gradients[key] * self.alpha4
elif key in weights5.keys():
gradients[key] = gradients[key] * self.alpha5
else:
pass
fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - gradients[key] for key in fast_weights.keys()]))
output = self.forward(inputb, fast_weights, reuse=True)
task_outputbs.append(output)
task_lossesb.append(self.loss_func(output, labelb))
task_output = [task_outputa, task_outputbs, task_lossa, task_lossesb]
task_accuracya = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputa), 1), tf.argmax(labela, 1))
for j in range(num_updates):
task_accuraciesb.append(tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(task_outputbs[j]), 1), tf.argmax(labelb, 1)))
task_output.extend([task_accuracya, task_accuraciesb])
return task_output
unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)
out_dtype = [tf.float32, [tf.float32]*num_updates, tf.float32, [tf.float32]*num_updates]
out_dtype.extend([tf.float32, [tf.float32]*num_updates])
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)
outputas, outputbs, lossesa, lossesb, accuraciesa, accuraciesb = result
## Performance & Optimization
if train:
self.total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
self.outputas, self.outputbs = outputas, outputbs
self.total_accuracy1 = tf.reduce_sum(accuraciesa) / tf.to_float(FLAGS.meta_batch_size)
self.total_accuracies2 = [tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
optimizer = tf.train.AdamOptimizer(self.meta_lr)
weight_l_loss0 = 0
if FLAGS.l2_alpha > 0:
for key, array in self.weights.items():
weight_l_loss0 += tf.reduce_sum(tf.square(array)) * FLAGS.l2_alpha
if FLAGS.l1_alpha > 0:
for key, array in self.weights.items():
weight_l_loss0 += tf.reduce_sum(tf.abs(array)) * FLAGS.l1_alpha
self.gvs = gvs = optimizer.compute_gradients(self.total_losses2[FLAGS.num_updates - 1] + weight_l_loss0)
gvs = [(tf.clip_by_value(grad, -10, 10), var) for grad, var in gvs]
self.metatrain_op = optimizer.apply_gradients(gvs)
else:
self.metaval_total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
self.metaval_total_accuracy1 = tf.reduce_sum(accuraciesa) / tf.to_float(FLAGS.meta_batch_size)
self.metaval_total_accuracies2 = [tf.reduce_sum(accuraciesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
|
mfrister/phipsair
|
setup.py
|
<reponame>mfrister/phipsair<filename>setup.py
from setuptools import find_packages, setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="phipsair",
version="0.4.1",
description="phipsair allows controlling Philips air purifiers via encrypted CoAP.",
long_description=long_description,
long_description_content_type="text/markdown",
author="betaboon, <NAME>",
url="https://github.com/mfrister/phipsair",
project_urls={
"Release notes": "https://github.com/mfrister/phipsair/blob/main/CHANGELOG.md",
"Bug Tracker": "https://github.com/mfrister/phipsair/issues",
},
license="MIT",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
package_data={"phipsair": ["py.typed"]},
install_requires=[
"aiocoap>=0.4.1, <0.5",
"pycryptodomex>=3.13, <4.0",
],
entry_points={
"console_scripts": [
"phipsair=phipsair.__main__:main",
],
},
zip_safe=False,
)
|
Nathaliacr/Nathaliacr-NathaliaCalderon_Ejercicio23
|
ejecuta.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
os.system("g++ suma.cpp -o suma.x")
os.system("./suma.x > suma.dat")
data = np.loadtxt("suma.dat")
print(data)
|
AaqeelorShahid/Task-7
|
main.py
|
<reponame>AaqeelorShahid/Task-7
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import dataset
dataset = pd.read_csv('UK.csv')
X = dataset.iloc[:, [1, 2]].values
# Using Elbow method to find number of Clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, init="k-means++", max_iter=300, n_init=10, random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
print(i)
plt.figure(figsize=(15, 10))
plt.plot (range(1, 11), wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of cluster")
plt.ylabel("WCSS")
plt.show()
# Applying K-Means to dataset
kmeans = KMeans(n_clusters=2, init="k-means++", n_init=10, max_iter=300, random_state=0)
y_kmeans = kmeans.fit_predict(X)
# Visulizing the data in Scatterplot
plt.figure(figsize=(15, 10))
plt.scatter (X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'blue', alpha=0.2, label = 'Cluster 1')
plt.scatter (X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'purple', alpha=0.5, label = 'Cluster 2')
plt.scatter (kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], alpha=0.5, s=300, c = 'cyan', label='Centroids')
plt.title("Clusters of CO2 Emission (UK)")
plt.xlabel("Year", size = 14)
plt.ylabel("Average CO2 Emission tones", size = 14)
plt.legend()
plt.show()
|
bioinformed/esa
|
esa/__init__.py
|
<filename>esa/__init__.py
from esa import *
|
bioinformed/esa
|
setup.py
|
<gh_stars>1-10
import numpy as np
from setuptools import setup, find_packages
from Cython.Distutils import build_ext
from distutils.extension import Extension
install_requires = ['Cython>=0.22', 'nose']
setup_requires = []
tests_require = ['coverage']
ext_modules = [Extension('esa.rmq', ['esa/rmq.pyx'], include_dirs = [np.get_include()]),
Extension('esa.esa', ['esa/esa.pyx', 'esa/sais.c'], include_dirs = [np.get_include()])]
classifiers = """
Development Status :: 2 - Alpha
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows :: Windows NT/2000
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: POSIX :: Linux
Operating System :: Unix
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bioinformatics
"""
if __name__ == '__main__':
setup(
name = 'esa',
version = '0.2',
description = 'Enhanced Suffix Array (ESA) implementation for Python',
#url = 'https://github.com/bioinformed/vgraph',
author = '<NAME>',
maintainer = '<NAME>',
author_email = '<EMAIL>',
maintainer_email = '<EMAIL>',
license = 'APACHE-2.0',
classifiers = classifiers,
zip_safe = False,
test_suite = 'nose.collector',
tests_require = tests_require,
packages = find_packages(),
install_requires = install_requires,
setup_requires = setup_requires,
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
#scripts=['bin/vgraph'],
)
|
jonashackt/pulumi-example-aws-python
|
__main__.py
|
import pulumi
import pulumi_aws as aws
from pulumi_aws import ec2
# AMI image configuration
ec2_image_id = 'ami-07d1bb89ff2dd50fe'
ec2_image_owner = '099720109477'
ec2_instance_size = 't2.micro'
ec2_instance_name = 'aws-ec2-ubuntu'
ec2_keypair_name = 'pulumi_key'
ec2_ssh_port = 22
# Lets use Pulumi to get the AMI image
pulumi_ami = aws.get_ami(
filters = [{ "name": "image-id", "values": [ec2_image_id]}],
owners = [ec2_image_owner])
# Create a EC2 security group
pulumi_security_group = ec2.SecurityGroup(
'pulumi-secgrp',
description = 'pulumi: enable SSH access & outgoing connections',
ingress = [
{ 'protocol': 'tcp', 'from_port': ec2_ssh_port, 'to_port': ec2_ssh_port, 'cidr_blocks': ['0.0.0.0/0'] }
],
egress = [
{ 'protocol': '-1', 'from_port': 0, 'to_port': 0, 'cidr_blocks': ['0.0.0.0/0'] }
]
)
# Create EC2 instance
ec2_instance = ec2.Instance(
ec2_instance_name,
key_name = ec2_keypair_name,
instance_type = ec2_instance_size,
security_groups = [pulumi_security_group.name],
ami = pulumi_ami.id
)
pulumi.export('publicIp', ec2_instance.public_ip)
pulumi.export('publicHostName', ec2_instance.public_dns)
|
jonashackt/pulumi-example-aws-python
|
tests/test_docker.py
|
import os
def test_is_docker_installed(host):
package_docker = host.package('docker-ce')
assert package_docker.is_installed
def test_vagrant_user_is_part_of_group_docker(host):
user_vagrant = host.user('vagrant')
assert 'docker' in user_vagrant.groups
def test_run_hello_world_container_successfully(host):
hello_world_ran = host.run("sudo docker run hello-world")
assert 'Hello from Docker!' in hello_world_ran.stdout
|
ahedengren/dbt-vertica
|
dbt/adapters/vertica/connections.py
|
<reponame>ahedengren/dbt-vertica
from contextlib import contextmanager
from dataclasses import dataclass
import ssl
import os
import requests
from typing import Optional
from dbt.adapters.base import Credentials
from dbt.adapters.sql import SQLConnectionManager
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.contracts.connection import AdapterResponse
import dbt.exceptions
import vertica_python
@dataclass
class verticaCredentials(Credentials):
host: str
database: str
schema: str
username: str
password: str
ssl: bool = False
port: int = 5433
timeout: int = 3600
withMaterialization: bool = False
ssl_env_cafile: Optional[str] = None
ssl_uri: Optional[str] = None
@property
def type(self):
return 'vertica'
@property
def unique_field(self):
"""
Hashed and included in anonymous telemetry to track adapter adoption.
Pick a field that can uniquely identify one team/organization building with this adapter
"""
return self.host
def _connection_keys(self):
# return an iterator of keys to pretty-print in 'dbt debug'
return ('host','port','database','username','schema')
class verticaConnectionManager(SQLConnectionManager):
TYPE = 'vertica'
@classmethod
def open(cls, connection):
if connection.state == 'open':
logger.debug(':P Connection is already open')
return connection
credentials = connection.credentials
try:
conn_info = {
'host': credentials.host,
'port': credentials.port,
'user': credentials.username,
'password': <PASSWORD>,
'database': credentials.database,
'connection_timeout': credentials.timeout,
'connection_load_balance': True,
'session_label': f'dbt_{credentials.username}',
}
# if credentials.ssl.lower() in {'true', 'yes', 'please'}:
if credentials.ssl:
if credentials.ssl_env_cafile is not None:
context = ssl.create_default_context(
cafile=os.environ.get(credentials.ssl_env_cafile),
)
elif credentials.ssl_uri is not None:
resp = requests.get(credentials.ssl_uri)
resp.raise_for_status()
ssl_data = resp.content
context = ssl.create_default_context(
cadata=ssl_data.decode("ascii", "ignore")
)
else:
context = ssl.create_default_context()
conn_info['ssl'] = context
logger.debug(f'SSL is on')
handle = vertica_python.connect(**conn_info)
connection.state = 'open'
connection.handle = handle
logger.debug(f':P Connected to database: {credentials.database} at {credentials.host}')
except Exception as exc:
logger.debug(f':P Error connecting to database: {exc}')
connection.state = 'fail'
connection.handle = None
raise dbt.exceptions.FailedToConnectException(str(exc))
# This is here mainly to support dbt-integration-tests.
# It globally enables WITH materialization for every connection dbt
# makes to Vertica. (Defaults to False)
# Normal usage would be to use query HINT or declare session parameter in model or hook,
# but tests do not support hooks and cannot change tests from dbt_utils
# used in dbt-integration-tests
if credentials.withMaterialization:
try:
logger.debug(f':P Set EnableWithClauseMaterialization')
cur = connection.handle.cursor()
cur.execute("ALTER SESSION SET PARAMETER EnableWithClauseMaterialization=1")
cur.close()
except Exception as exc:
logger.debug(f':P Could not EnableWithClauseMaterialization: {exc}')
pass
return connection
@classmethod
def get_response(cls, cursor):
code = cursor.description
rows = cursor.rowcount
return AdapterResponse(
_message="{} {}".format(code, rows),
rows_affected=rows,
code=code
)
def cancel(self, connection):
logger.debug(':P Cancel query')
connection.handle.cancel()
@contextmanager
def exception_handler(self, sql):
try:
yield
except vertica_python.DatabaseError as exc:
logger.debug(f':P Database error: {exc}')
self.release()
raise dbt.exceptions.DatabaseException(str(exc))
except Exception as exc:
logger.debug(f':P Error: {exc}')
self.release()
raise dbt.exceptions.RuntimeException(str(exc))
|
ahedengren/dbt-vertica
|
setup.py
|
#!/usr/bin/env python
from setuptools import find_packages
from setuptools import setup
import pathlib
package_name = "dbt-verrtica"
package_version = "0.21.0"
description = """The vertica adapter plugin for dbt (data build tool)"""
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name=package_name,
version=package_version,
description=description,
long_description=README,
long_description_content_type='text/markdown',
license='MIT',
author='<NAME> (original), @ahedengren, <NAME>',
author_email='<EMAIL>',
url='https://github.com/andyreagan/dbt-vertica',
packages=find_packages(),
package_data={
'dbt': [
'include/vertica/dbt_project.yml',
'include/vertica/macros/*.sql',
'include/vertica/macros/materializations/*.sql',
]
},
install_requires=[
'dbt-core>=0.21.0',
'vertica-python>=0.10.0',
]
)
|
AminovE99/link_saver
|
visited_links/tests.py
|
<gh_stars>0
import ast
import json
import time
import unittest
import redis
from django.test import TestCase, Client
# Create your tests here.
from link_saver import settings
from visited_links.apps import VisitedLinksConfig
from visited_links.views import VisitedLinksRegisterView
class TestVisitedLinks(unittest.TestCase):
def setUp(self):
self.test_client = Client()
self.request_1 = '''
{
"links": [
"https://ya.ru",
"https://ya.ru?q=123",
"funbox.ru",
"https://stackoverflow.com/questions/11828270/how-to-exit-the-vim-editor"
]
}
'''
self.request_2 = '''
{
"links": [
"https://google.com",
"https://vk.com/news",
"aminovE99.github.io"
]
}
'''
self.request_empty = '''
{
"links": [
],
"entity":[
"Another entity"
]
}
'''
self.request_1_json = json.loads(self.request_1)
self.request_2_json = json.loads(self.request_2)
self.request_empty = json.loads(self.request_empty)
self.redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, db=0, decode_responses=True)
def tearDown(self):
self.redis_instance.flushall()
def test_VisitedLinksRegisterView_success(self):
response = self.test_client.post('/visited_links', self.request_1_json, content_type="application/json")
self.assertEqual(response.status_code, 200)
def test_VisitedLinksRegisterView_refuse_get(self):
response = self.test_client.get('/visited_links', content_type="application/json")
self.assertEqual(response.status_code, 405)
def test_VisitedLinksRegisterView_check_db(self):
self.test_client.post('/visited_links', self.request_1_json, content_type="application/json")
links = self.redis_instance.zrange('links', 0, -1)[0]
links_set = ast.literal_eval(links)
self.assertIn('ya.ru', links_set)
self.assertIn('funbox.ru', links_set)
self.assertIn('stackoverflow.com', links_set)
def test_VisitedLinksRegisterView_empty_links(self):
response = self.test_client.post('/visited_links', self.request_empty, content_type="application/json")
self.assertEqual(response.status_code, 422)
def test_GetLinksRegisterView_check_db(self):
from_timestamp = int(time.time())
self.test_client.post('/visited_links', self.request_1_json, content_type="application/json")
self.test_client.post('/visited_links', self.request_2_json, content_type="application/json")
to_timestamp = int(time.time())
response = self.test_client.get('/visited_domains?from={}&to={}'.format(from_timestamp, to_timestamp))
response_dict = response.json()
self.assertTrue('aminovE99.github.io', response_dict['domains'])
self.assertTrue('stackoverflow.com', response_dict['domains'])
self.assertTrue('google.com', response_dict['domains'])
self.assertTrue('funbox.ru', response_dict['domains'])
self.assertTrue('ya.ru', response_dict['domains'])
self.assertTrue('vk.com', response_dict['domains'])
def test_GetLinksRegisterView_empty_list(self):
self.test_client.post('/visited_links', self.request_1_json, content_type="application/json")
response = self.test_client.get('/visited_domains?from={}&to={}'.format("10000", "20000"))
response_dict = response.json()
self.assertEqual(response_dict['domains'], [])
def test_GetLinksRegisterView_invalid_from_timestamp(self):
response = self.test_client.get('/visited_domains')
self.assertEqual(response.json()['status'], "From timestamp not found")
self.assertEqual(response.status_code, 422)
def test_GetLinksRegisterView_invalid_to_timestamp(self):
from_timestamp = int(time.time())
response = self.test_client.get('/visited_domains?from={}'.format(from_timestamp))
self.assertEqual(response.json()['status'], "To timestamp not found")
self.assertEqual(response.status_code, 422)
def test_GetLinksRegisterView_invalid_format(self):
response = self.test_client.get('/visited_domains?from={}'.format("symbols"))
self.assertEqual(response.json()['status'], "To timestamp not found")
self.assertEqual(response.status_code, 422)
|
AminovE99/link_saver
|
visited_links/utils.py
|
import urllib.parse as urlparse
def handle_links(links):
"""
Validate links from usual strings
:param links: list of strings with possible links
:return: set of strings
"""
handled_links = []
for link in links:
h_link = urlparse.urlparse(link)
if not h_link:
continue
if h_link.scheme:
handled_links.append(h_link.netloc)
elif len(h_link.path.split('.')) > 1:
handled_links.append(h_link.path)
# Filter duplicates from list
handled_links_set = set(handled_links)
return handled_links_set
|
AminovE99/link_saver
|
visited_links/redis_services.py
|
<reponame>AminovE99/link_saver
import ast
def save_link_visits(redis_instance, links, timestamp):
redis_instance.zadd('links', {str(links): timestamp})
def get_links_from(redis_instance, timestamp_from, timestamp_to):
links = redis_instance.zrangebyscore('links', timestamp_from, timestamp_to)
all_links = set()
if not links:
return []
for link in links:
cur_links = ast.literal_eval(link)
all_links.update(cur_links)
return all_links
|
AminovE99/link_saver
|
visited_links/views.py
|
import json
import time
import redis
from django.http import JsonResponse
from rest_framework.views import APIView
from link_saver import settings
from visited_links.redis_services import save_link_visits, get_links_from
from visited_links.utils import handle_links
class VisitedLinksRegisterView(APIView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, decode_responses=True)
def post(self, request):
current_timestamp = int(time.time())
# Get parameters from request
links = request.data.get('links')
if not links:
return JsonResponse(data={'status': "Links not found"}, status=422)
# Filter links from garbage and leave only domains
handled_links = handle_links(links)
save_link_visits(self.redis_instance, handled_links, current_timestamp)
return JsonResponse(data={'status': 'ok'}, status=200)
class GetLinksRegisterView(APIView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.redis_instance = redis.StrictRedis(host=settings.REDIS_HOST,
port=settings.REDIS_PORT, decode_responses=True)
def get(self, request):
# Get parameters from request
from_timestamp = request.GET.get('from')
to_timestamp = request.GET.get('to')
# Validate timestamp values
if from_timestamp is None:
return JsonResponse(data={'status': "From timestamp not found"}, status=422)
if to_timestamp is None:
return JsonResponse(data={'status': "To timestamp not found"}, status=422)
if not to_timestamp.isdigit() or not from_timestamp.isdigit():
return JsonResponse(data={'status': "Timestamp Validation error"}, status=422)
# Get links from redis service
links = get_links_from(self.redis_instance, from_timestamp, to_timestamp)
return JsonResponse(data={'domains': list(links), 'status': 'ok'})
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_utils/test_tcp.py
|
"""
Units tests for openff.evaluator.utils.tcp
"""
from openff.evaluator.utils import tcp
def test_message_packing():
"""Test that packing / unpacking ints works as expected"""
assert tcp.unpack_int(tcp.pack_int(20))[0] == 20
def test_message_type_enum():
"""Test the message type enum creation."""
assert tcp.EvaluatorMessageTypes(0) == tcp.EvaluatorMessageTypes.Undefined
assert tcp.EvaluatorMessageTypes(1) == tcp.EvaluatorMessageTypes.Submission
assert tcp.EvaluatorMessageTypes(2) == tcp.EvaluatorMessageTypes.Query
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_plugins.py
|
"""
Units tests for the openff.evaluator.plugins module.
"""
import pkg_resources
from openff.evaluator.layers import (
registered_calculation_layers,
registered_calculation_schemas,
)
from openff.evaluator.plugins import register_default_plugins, register_external_plugins
from openff.evaluator.workflow import registered_workflow_protocols
def test_register_default_plugins():
register_default_plugins()
assert len(registered_workflow_protocols) > 0
assert len(registered_calculation_layers) > 0
assert len(registered_calculation_schemas) > 0
def test_register_external_plugins(caplog):
"""This test is based on `this stack overflow answer
<https://stackoverflow.com/a/48666503/11808960>`_
"""
# Create a fake distribution to insert into the global working_set
distribution = pkg_resources.Distribution(__file__)
# Create the fake entry point definitions
valid_entry_point = pkg_resources.EntryPoint.parse(
"dummy_1 = openff.evaluator.properties", dist=distribution
)
bad_entry_point = pkg_resources.EntryPoint.parse(
"dummy_2 = openff.evaluator.propertis", dist=distribution
)
# Add the mapping to the fake EntryPoint
distribution._ep_map = {
"openff_evaluator.plugins": {
"dummy_1": valid_entry_point,
"dummy_2": bad_entry_point,
}
}
# Add the fake distribution to the global working_set
pkg_resources.working_set.add(distribution, "dummy_1")
pkg_resources.working_set.add(distribution, "dummy_2")
register_external_plugins()
# Check that we could / couldn't load the correct plugins.
assert "Could not load the dummy_1" not in caplog.text
assert "Could not load the dummy_2" in caplog.text
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_datasets/test_curation/test_workflow.py
|
<reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/tests/test_datasets/test_curation/test_workflow.py
import numpy
import pandas
import pytest
from openff.units import unit
from openff.evaluator.datasets import (
MeasurementSource,
PhysicalPropertyDataSet,
PropertyPhase,
)
from openff.evaluator.datasets.curation.components.filtering import (
FilterByPressureSchema,
FilterByTemperatureSchema,
)
from openff.evaluator.datasets.curation.workflow import (
CurationWorkflow,
CurationWorkflowSchema,
)
from openff.evaluator.properties import Density
from openff.evaluator.substances import Substance
from openff.evaluator.thermodynamics import ThermodynamicState
@pytest.fixture(scope="module")
def data_frame() -> pandas.DataFrame:
data_set = PhysicalPropertyDataSet()
data_set.add_properties(
Density(
thermodynamic_state=ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("C"),
),
Density(
thermodynamic_state=ThermodynamicState(
temperature=305.15 * unit.kelvin,
pressure=101.325 * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("C"),
),
Density(
thermodynamic_state=ThermodynamicState(
temperature=298.15 * unit.kelvin,
pressure=105.325 * unit.kilopascal,
),
phase=PropertyPhase.Liquid,
value=1.0 * Density.default_unit(),
uncertainty=1.0 * Density.default_unit(),
source=MeasurementSource(doi=" "),
substance=Substance.from_components("C"),
),
)
return data_set.to_pandas()
@pytest.fixture(scope="module")
def data_set(data_frame: pandas.DataFrame) -> PhysicalPropertyDataSet:
return PhysicalPropertyDataSet.from_pandas(data_frame)
def test_workflow_data_frame(data_frame):
"""Test that a simple curation workflow can be applied to a data frame."""
schema = CurationWorkflowSchema(
component_schemas=[
FilterByTemperatureSchema(
minimum_temperature=290.0, maximum_temperature=300.0
),
FilterByPressureSchema(minimum_pressure=101.3, maximum_pressure=101.4),
]
)
filtered_frame = CurationWorkflow.apply(data_frame, schema)
assert isinstance(filtered_frame, pandas.DataFrame)
assert len(filtered_frame) == 1
assert numpy.isclose(filtered_frame["Temperature (K)"].values[0], 298.15)
assert numpy.isclose(filtered_frame["Pressure (kPa)"].values[0], 101.325)
def test_workflow_data_set(data_set):
"""Test that a simple curation workflow can be applied to a data set."""
schema = CurationWorkflowSchema(
component_schemas=[
FilterByTemperatureSchema(
minimum_temperature=290.0, maximum_temperature=300.0
),
FilterByPressureSchema(minimum_pressure=101.3, maximum_pressure=101.4),
]
)
filtered_set = CurationWorkflow.apply(data_set, schema)
assert isinstance(filtered_set, PhysicalPropertyDataSet)
assert len(filtered_set) == 1
assert numpy.isclose(
filtered_set.properties[0].thermodynamic_state.temperature, 298.15 * unit.kelvin
)
assert numpy.isclose(
filtered_set.properties[0].thermodynamic_state.pressure,
101.325 * unit.kilopascal,
)
|
jaketanderson/openff-evaluator
|
openff/evaluator/utils/exceptions.py
|
"""
A collection of commonly raised python exceptions.
"""
import traceback
from typing import Optional
from openff.evaluator.utils.serialization import TypedBaseModel
class EvaluatorException(TypedBaseModel, BaseException):
"""A serializable wrapper around an `Exception`."""
@classmethod
def from_exception(cls, exception):
"""Initialize this class from an existing exception.
Parameters
----------
exception: Exception
The existing exception
Returns
-------
cls
The initialized exception object.
"""
message = traceback.format_exception(None, exception, exception.__traceback__)
return cls(message)
def __init__(self, message=None):
"""Constructs a new EvaluatorException object.
Parameters
----------
message: str or list of str
Information about the raised exception.
"""
super(EvaluatorException, self).__init__(message)
self.message = message
def __getstate__(self):
return {"message": self.message}
def __setstate__(self, state):
self.message = state["message"]
def __str__(self):
message = self.message
if isinstance(message, list):
message = "".join(message)
return str(message)
class MissingOptionalDependency(EvaluatorException):
"""An exception raised when an optional dependency is required
but cannot be found.
Attributes
----------
library_name
The name of the missing library.
license_issue
Whether the library was importable but was unusable due
to a missing license.
"""
def __init__(
self,
library_name: str,
license_issue: bool = False,
extra: Optional[str] = None,
):
"""
Parameters
----------
library_name
The name of the missing library.
license_issue
Whether the library was importable but was unusable due
to a missing license.
extra
An extra string to append to the error message.
"""
message = f"The optional {library_name} module could not be imported."
if license_issue:
message = f"{message} This is due to a missing license."
if extra:
message = f"{message} {extra}"
super(MissingOptionalDependency, self).__init__(message)
self.library_name = library_name
self.license_issue = license_issue
|
jaketanderson/openff-evaluator
|
openff/evaluator/properties/enthalpy.py
|
"""
A collection of enthalpy physical property definitions.
"""
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED, PlaceholderValue
from openff.evaluator.datasets import PhysicalProperty, PropertyPhase
from openff.evaluator.datasets.thermoml import thermoml_property
from openff.evaluator.layers import register_calculation_schema
from openff.evaluator.layers.reweighting import ReweightingLayer, ReweightingSchema
from openff.evaluator.layers.simulation import SimulationLayer, SimulationSchema
from openff.evaluator.properties.properties import EstimableExcessProperty
from openff.evaluator.protocols import analysis, groups, miscellaneous
from openff.evaluator.protocols.utils import (
generate_reweighting_protocols,
generate_simulation_protocols,
)
from openff.evaluator.storage.query import SimulationDataQuery
from openff.evaluator.thermodynamics import Ensemble
from openff.evaluator.utils.observables import ObservableType
from openff.evaluator.workflow.schemas import WorkflowSchema
from openff.evaluator.workflow.utils import ProtocolPath
@thermoml_property(
"Excess molar enthalpy (molar enthalpy of mixing), kJ/mol",
supported_phases=PropertyPhase.Liquid,
)
class EnthalpyOfMixing(EstimableExcessProperty):
"""A class representation of an enthalpy of mixing property"""
@classmethod
def default_unit(cls):
return unit.kilojoule / unit.mole
@classmethod
def _observable_type(cls) -> ObservableType:
return ObservableType.Enthalpy
@classmethod
def default_reweighting_schema(
cls,
absolute_tolerance: unit.Quantity = UNDEFINED,
relative_tolerance: float = UNDEFINED,
n_effective_samples: int = 50,
) -> ReweightingSchema:
calculation_schema = super(EnthalpyOfMixing, cls)._default_reweighting_schema(
ObservableType.ReducedPotential,
absolute_tolerance,
relative_tolerance,
n_effective_samples,
)
# Divide the excess reduced potential by beta to get an approximation
# of the excess enthalpy.
excess_enthalpy_of_mixing = miscellaneous.MultiplyValue(
"excess_enthalpy_of_mixing"
)
excess_enthalpy_of_mixing.value = (
calculation_schema.workflow_schema.final_value_source
)
excess_enthalpy_of_mixing.multiplier = ProtocolPath(
"thermodynamic_state.inverse_beta", "global"
)
# Update the workflow schema.
calculation_schema.workflow_schema.protocol_schemas.append(
excess_enthalpy_of_mixing.schema
)
calculation_schema.workflow_schema.final_value_source = ProtocolPath(
"result", excess_enthalpy_of_mixing.id
)
return calculation_schema
@thermoml_property(
"Molar enthalpy of vaporization or sublimation, kJ/mol",
supported_phases=PropertyPhase.Liquid | PropertyPhase.Gas,
)
class EnthalpyOfVaporization(PhysicalProperty):
"""A class representation of an enthalpy of vaporization property"""
@classmethod
def default_unit(cls):
return unit.kilojoule / unit.mole
@staticmethod
def _default_reweighting_storage_query():
"""Returns the default storage queries to use when
retrieving cached simulation data to reweight.
This will include one query for the liquid data (with the
key `"liquid_data"`) and one for the gas data (with the key
`"gas_data"`).
Returns
-------
dict of str and SimulationDataQuery
The dictionary of queries.
"""
liquid_data_query = SimulationDataQuery()
liquid_data_query.substance = PlaceholderValue()
liquid_data_query.property_phase = PropertyPhase.Liquid
gas_data_query = SimulationDataQuery()
gas_data_query.substance = PlaceholderValue()
gas_data_query.property_phase = PropertyPhase.Gas
gas_data_query.number_of_molecules = 1
return {
"liquid_data": liquid_data_query,
"gas_data": gas_data_query,
}
@staticmethod
def default_simulation_schema(
absolute_tolerance=UNDEFINED, relative_tolerance=UNDEFINED, n_molecules=1000
):
"""Returns the default calculation schema to use when estimating
this class of property from direct simulations.
Parameters
----------
absolute_tolerance: openff.evaluator.unit.Quantity, optional
The absolute tolerance to estimate the property to within.
relative_tolerance: float
The tolerance (as a fraction of the properties reported
uncertainty) to estimate the property to within.
n_molecules: int
The number of molecules to use in the simulation.
Returns
-------
SimulationSchema
The schema to follow when estimating this property.
"""
assert absolute_tolerance == UNDEFINED or relative_tolerance == UNDEFINED
calculation_schema = SimulationSchema()
calculation_schema.absolute_tolerance = absolute_tolerance
calculation_schema.relative_tolerance = relative_tolerance
use_target_uncertainty = (
absolute_tolerance != UNDEFINED or relative_tolerance != UNDEFINED
)
# Define a custom conditional group which will ensure both the liquid and
# gas enthalpies are estimated to within the specified uncertainty tolerance.
converge_uncertainty = groups.ConditionalGroup("conditional_group")
converge_uncertainty.max_iterations = 100
# Define the protocols to perform the simulation in the liquid phase.
average_liquid_energy = analysis.AverageObservable("average_liquid_potential")
average_liquid_energy.divisor = n_molecules
(
liquid_protocols,
liquid_value_source,
liquid_output_to_store,
) = generate_simulation_protocols(
average_liquid_energy,
use_target_uncertainty,
"_liquid",
converge_uncertainty,
n_molecules=n_molecules,
)
liquid_output_to_store.property_phase = PropertyPhase.Liquid
liquid_protocols.analysis_protocol.observable = ProtocolPath(
f"observables[{ObservableType.PotentialEnergy.value}]",
liquid_protocols.production_simulation.id,
)
# Define the protocols to perform the simulation in the gas phase.
average_gas_energy = analysis.AverageObservable("average_gas_potential")
(
gas_protocols,
gas_value_source,
gas_output_to_store,
) = generate_simulation_protocols(
average_gas_energy,
use_target_uncertainty,
"_gas",
converge_uncertainty,
n_molecules=1,
)
gas_output_to_store.property_phase = PropertyPhase.Gas
gas_protocols.analysis_protocol.observable = ProtocolPath(
f"observables[{ObservableType.PotentialEnergy.value}]",
gas_protocols.production_simulation.id,
)
# Specify that for the gas phase only a single molecule in vacuum should be
# created.
gas_protocols.build_coordinates.max_molecules = 1
gas_protocols.build_coordinates.mass_density = (
0.01 * unit.gram / unit.milliliter
)
# Run the gas phase simulations in the NVT ensemble without PBC
gas_protocols.energy_minimisation.enable_pbc = False
gas_protocols.equilibration_simulation.ensemble = Ensemble.NVT
gas_protocols.equilibration_simulation.enable_pbc = False
gas_protocols.production_simulation.ensemble = Ensemble.NVT
gas_protocols.production_simulation.enable_pbc = False
gas_protocols.production_simulation.steps_per_iteration = 15000000
gas_protocols.production_simulation.output_frequency = 5000
gas_protocols.production_simulation.checkpoint_frequency = 100
# Due to a bizarre issue where the OMM Reference platform is
# the fastest at computing properties of a single molecule
# in vacuum, we enforce those inputs which will force the
# gas calculations to run on the Reference platform.
gas_protocols.equilibration_simulation.high_precision = True
gas_protocols.equilibration_simulation.allow_gpu_platforms = False
gas_protocols.production_simulation.high_precision = True
gas_protocols.production_simulation.allow_gpu_platforms = False
# Combine the values to estimate the final energy of vaporization
energy_of_vaporization = miscellaneous.SubtractValues("energy_of_vaporization")
energy_of_vaporization.value_b = ProtocolPath("value", average_gas_energy.id)
energy_of_vaporization.value_a = ProtocolPath("value", average_liquid_energy.id)
ideal_volume = miscellaneous.MultiplyValue("ideal_volume")
ideal_volume.value = 1.0 * unit.molar_gas_constant
ideal_volume.multiplier = ProtocolPath(
"thermodynamic_state.temperature", "global"
)
enthalpy_of_vaporization = miscellaneous.AddValues("enthalpy_of_vaporization")
enthalpy_of_vaporization.values = [
ProtocolPath("result", energy_of_vaporization.id),
ProtocolPath("result", ideal_volume.id),
]
# Add the extra protocols and conditions to the custom conditional group.
converge_uncertainty.add_protocols(
energy_of_vaporization, ideal_volume, enthalpy_of_vaporization
)
if use_target_uncertainty:
condition = groups.ConditionalGroup.Condition()
condition.type = groups.ConditionalGroup.Condition.Type.LessThan
condition.left_hand_value = ProtocolPath(
"result.error",
converge_uncertainty.id,
enthalpy_of_vaporization.id,
)
condition.right_hand_value = ProtocolPath("target_uncertainty", "global")
gas_protocols.production_simulation.total_number_of_iterations = (
ProtocolPath("current_iteration", converge_uncertainty.id)
)
liquid_protocols.production_simulation.total_number_of_iterations = (
ProtocolPath("current_iteration", converge_uncertainty.id)
)
converge_uncertainty.add_condition(condition)
# Build the workflow schema.
schema = WorkflowSchema()
schema.protocol_schemas = [
liquid_protocols.build_coordinates.schema,
liquid_protocols.assign_parameters.schema,
liquid_protocols.energy_minimisation.schema,
liquid_protocols.equilibration_simulation.schema,
liquid_protocols.decorrelate_trajectory.schema,
liquid_protocols.decorrelate_observables.schema,
gas_protocols.build_coordinates.schema,
gas_protocols.assign_parameters.schema,
gas_protocols.energy_minimisation.schema,
gas_protocols.equilibration_simulation.schema,
gas_protocols.decorrelate_trajectory.schema,
gas_protocols.decorrelate_observables.schema,
converge_uncertainty.schema,
]
schema.outputs_to_store = {
"liquid_data": liquid_output_to_store,
"gas_data": gas_output_to_store,
}
schema.final_value_source = ProtocolPath(
"result", converge_uncertainty.id, enthalpy_of_vaporization.id
)
calculation_schema.workflow_schema = schema
return calculation_schema
@classmethod
def default_reweighting_schema(
cls,
absolute_tolerance=UNDEFINED,
relative_tolerance=UNDEFINED,
n_effective_samples=50,
):
"""Returns the default calculation schema to use when estimating
this property by reweighting existing data.
Parameters
----------
absolute_tolerance: openff.evaluator.unit.Quantity, optional
The absolute tolerance to estimate the property to within.
relative_tolerance: float
The tolerance (as a fraction of the properties reported
uncertainty) to estimate the property to within.
n_effective_samples: int
The minimum number of effective samples to require when
reweighting the cached simulation data.
Returns
-------
ReweightingSchema
The schema to follow when estimating this property.
"""
assert absolute_tolerance == UNDEFINED or relative_tolerance == UNDEFINED
calculation_schema = ReweightingSchema()
calculation_schema.absolute_tolerance = absolute_tolerance
calculation_schema.relative_tolerance = relative_tolerance
# Set up the storage queries
calculation_schema.storage_queries = cls._default_reweighting_storage_query()
# Set up a protocol to extract the liquid phase energy from the existing data.
liquid_protocols, liquid_replicator = generate_reweighting_protocols(
ObservableType.PotentialEnergy,
id_suffix="_liquid",
replicator_id="liquid_data_replicator",
)
liquid_replicator.template_values = ProtocolPath("liquid_data", "global")
liquid_protocols.reweight_observable.required_effective_samples = (
n_effective_samples
)
# Dive the potential by the number of liquid phase molecules from the first
# piece of cached data.
divide_by_liquid_molecules = miscellaneous.DivideValue(
"divide_by_liquid_molecules"
)
divide_by_liquid_molecules.value = ProtocolPath(
"value", liquid_protocols.reweight_observable.id
)
divide_by_liquid_molecules.divisor = ProtocolPath(
"total_number_of_molecules",
liquid_protocols.unpack_stored_data.id.replace(
liquid_replicator.placeholder_id, "0"
),
)
# Set up a protocol to extract the gas phase energy from the existing data.
gas_protocols, gas_replicator = generate_reweighting_protocols(
ObservableType.PotentialEnergy,
id_suffix="_gas",
replicator_id="gas_data_replicator",
)
gas_replicator.template_values = ProtocolPath("gas_data", "global")
gas_protocols.reweight_observable.required_effective_samples = (
n_effective_samples
)
# Turn of PBC for the gas phase.
gas_protocols.evaluate_reference_potential.enable_pbc = False
gas_protocols.evaluate_target_potential.enable_pbc = False
# Combine the values to estimate the final enthalpy of vaporization
energy_of_vaporization = miscellaneous.SubtractValues("energy_of_vaporization")
energy_of_vaporization.value_b = ProtocolPath(
"value", gas_protocols.reweight_observable.id
)
energy_of_vaporization.value_a = ProtocolPath(
"result", divide_by_liquid_molecules.id
)
ideal_volume = miscellaneous.MultiplyValue("ideal_volume")
ideal_volume.value = 1.0 * unit.molar_gas_constant
ideal_volume.multiplier = ProtocolPath(
"thermodynamic_state.temperature", "global"
)
enthalpy_of_vaporization = miscellaneous.AddValues("enthalpy_of_vaporization")
enthalpy_of_vaporization.values = [
ProtocolPath("result", energy_of_vaporization.id),
ProtocolPath("result", ideal_volume.id),
]
# Build the workflow schema.
schema = WorkflowSchema()
schema.protocol_schemas = [
*(x.schema for x in liquid_protocols if x is not None),
*(x.schema for x in gas_protocols if x is not None),
divide_by_liquid_molecules.schema,
energy_of_vaporization.schema,
ideal_volume.schema,
enthalpy_of_vaporization.schema,
]
schema.protocol_replicators = [liquid_replicator, gas_replicator]
schema.final_value_source = ProtocolPath("result", enthalpy_of_vaporization.id)
calculation_schema.workflow_schema = schema
return calculation_schema
# Register the properties via the plugin system.
register_calculation_schema(
EnthalpyOfMixing, SimulationLayer, EnthalpyOfMixing.default_simulation_schema
)
register_calculation_schema(
EnthalpyOfMixing, ReweightingLayer, EnthalpyOfMixing.default_reweighting_schema
)
register_calculation_schema(
EnthalpyOfVaporization,
SimulationLayer,
EnthalpyOfVaporization.default_simulation_schema,
)
register_calculation_schema(
EnthalpyOfVaporization,
ReweightingLayer,
EnthalpyOfVaporization.default_reweighting_schema,
)
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_thermodynamics.py
|
"""
Units tests for openff.evaluator.thermodynamics
"""
import pint
import pytest
from openff.units import unit
from openff.evaluator.thermodynamics import ThermodynamicState
def test_state_equality():
state_a = ThermodynamicState(
temperature=1.0 * unit.kelvin, pressure=1.0 * unit.pascals
)
state_b = ThermodynamicState(
temperature=1.0004 * unit.kelvin, pressure=1.0004 * unit.pascals
)
assert state_a == state_b
state_c = ThermodynamicState(
temperature=1.001 * unit.kelvin, pressure=1.001 * unit.pascals
)
assert state_a != state_c
assert hash(state_a) != hash(state_c)
state_d = ThermodynamicState(
temperature=1.0005 * unit.kelvin, pressure=1.0005 * unit.pascals
)
assert state_a == state_d
assert state_c != state_d
state_e = ThermodynamicState(
temperature=0.9995 * unit.kelvin, pressure=0.9995 * unit.pascals
)
assert state_a == state_e
@pytest.mark.parametrize(
"state",
[
ThermodynamicState(temperature=1.0 * unit.kelvin),
ThermodynamicState(temperature=1.0 * unit.kelvin, pressure=1.0 * unit.pascals),
],
)
def test_state_valid_checks(state):
state.validate()
@pytest.mark.parametrize(
"state",
[
ThermodynamicState(),
ThermodynamicState(temperature=1.0 * unit.pascals),
ThermodynamicState(temperature=1.0 * unit.pascals, pressure=1.0 * unit.kelvin),
ThermodynamicState(temperature=-1.0 * unit.kelvin),
],
)
def test_state_invalid_checks(state):
with pytest.raises((ValueError, AssertionError, pint.errors.DimensionalityError)):
state.validate()
|
jaketanderson/openff-evaluator
|
openff/evaluator/backends/__init__.py
|
from .backends import CalculationBackend, ComputeResources, QueueWorkerResources
__all__ = [
ComputeResources,
CalculationBackend,
QueueWorkerResources,
]
|
jaketanderson/openff-evaluator
|
openff/evaluator/datasets/provenance.py
|
"""A collection of classes to help track the provenance
of measured / estimated properties.
"""
from openff.evaluator.utils.serialization import TypedBaseModel
class Source(TypedBaseModel):
"""Container class for information about how a property was measured / calculated.
.. todo:: Swap this out with a more general provenance class.
"""
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
class MeasurementSource(Source):
"""Contains any metadata about how a physical property was measured by experiment.
This class contains either the DOI and/or the reference, but must contain at
least one as the observable must have a source, even if it was measured in lab.
Attributes
----------
doi : str or None, default None
The DOI for the source, preferred way to identify for source
reference : str
The long form description of the source if no DOI is available, or more
information is needed or wanted.
"""
def __init__(self, doi="", reference=""):
"""Constructs a new MeasurementSource object.
Parameters
----------
doi : str or None, default None
The DOI for the source, preferred way to identify for source
reference : str
The long form description of the source if no DOI is available, or more
information is needed or wanted.
"""
self.doi = doi
self.reference = reference
def __getstate__(self):
return {
"doi": self.doi,
"reference": self.reference,
}
def __setstate__(self, state):
self.doi = state["doi"]
self.reference = state["reference"]
class CalculationSource(Source):
"""Contains any metadata about how a physical property was calculated.
This includes at which fidelity the property was calculated at (e.g Direct
simulation, reweighting, ...) in addition to the parameters which were
used as part of the calculations.
Attributes
----------
fidelity : str
The fidelity at which the property was calculated
provenance : dict of str and Any
A dictionary containing information about how the property was calculated.
"""
def __init__(self, fidelity=None, provenance=None):
"""Constructs a new CalculationSource object.
Parameters
----------
fidelity : str
The fidelity at which the property was calculated
provenance : dict of str and Any
A dictionary containing information about how the property was calculated.
"""
self.fidelity = fidelity
self.provenance = provenance
def __getstate__(self):
return {
"fidelity": self.fidelity,
"provenance": self.provenance,
}
def __setstate__(self, state):
self.fidelity = state["fidelity"]
self.provenance = state["provenance"]
|
jaketanderson/openff-evaluator
|
openff/evaluator/protocols/miscellaneous.py
|
<reponame>jaketanderson/openff-evaluator
"""
A collection of miscellaneous protocols, mostly aimed at performing simple
math operations.
"""
import typing
import numpy as np
from openff.units import unit
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.forcefield import ParameterGradient, ParameterGradientKey
from openff.evaluator.substances import Component, MoleFraction, Substance
from openff.evaluator.utils.observables import Observable, ObservableArray
from openff.evaluator.workflow import Protocol, workflow_protocol
from openff.evaluator.workflow.attributes import InputAttribute, OutputAttribute
@workflow_protocol()
class AddValues(Protocol):
"""A protocol to add together a list of values.
Notes
-----
The `values` input must either be a list of openff.evaluator.unit.Quantity, a
ProtocolPath to a list of openff.evaluator.unit.Quantity, or a list of ProtocolPath
which each point to a openff.evaluator.unit.Quantity.
"""
values = InputAttribute(
docstring="The values to add together.", type_hint=list, default_value=UNDEFINED
)
result = OutputAttribute(
docstring="The sum of the values.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
)
def _execute(self, directory, available_resources):
if len(self.values) < 1:
raise ValueError("There were no values to add together")
self.result = self.values[0]
for value in self.values[1:]:
self.result += value
@workflow_protocol()
class SubtractValues(Protocol):
"""A protocol to subtract one value from another such that:
`result = value_b - value_a`
"""
value_a = InputAttribute(
docstring="`value_a` in the formula `result` = `value_b` - `value_a`.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
default_value=UNDEFINED,
)
value_b = InputAttribute(
docstring="`value_b` in the formula `result` = `value_b` - `value_a`.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
default_value=UNDEFINED,
)
result = OutputAttribute(
docstring="The results of `value_b` - `value_a`.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
)
def _execute(self, directory, available_resources):
self.result = self.value_b - self.value_a
@workflow_protocol()
class MultiplyValue(Protocol):
"""A protocol which multiplies a value by a specified scalar"""
value = InputAttribute(
docstring="The value to multiply.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
default_value=UNDEFINED,
)
multiplier = InputAttribute(
docstring="The scalar to multiply by.",
type_hint=typing.Union[int, float, unit.Quantity],
default_value=UNDEFINED,
)
result = OutputAttribute(
docstring="The result of the multiplication.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
)
def _execute(self, directory, available_resources):
self.result = self.value * self.multiplier
@workflow_protocol()
class DivideValue(Protocol):
"""A protocol which divides a value by a specified scalar"""
value = InputAttribute(
docstring="The value to divide.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
default_value=UNDEFINED,
)
divisor = InputAttribute(
docstring="The scalar to divide by.",
type_hint=typing.Union[int, float, unit.Quantity],
default_value=UNDEFINED,
)
result = OutputAttribute(
docstring="The result of the division.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
)
def _execute(self, directory, available_resources):
self.result = self.value / self.divisor
@workflow_protocol()
class WeightByMoleFraction(Protocol):
"""Multiplies a value by the mole fraction of a component
in a `Substance`.
"""
value = InputAttribute(
docstring="The value to be weighted.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
default_value=UNDEFINED,
)
component = InputAttribute(
docstring="The component whose mole fraction to weight by.",
type_hint=Substance,
default_value=UNDEFINED,
)
full_substance = InputAttribute(
docstring="The full substance which describes the mole fraction of the "
"component.",
type_hint=Substance,
default_value=UNDEFINED,
)
weighted_value = OutputAttribute(
"The value weighted by the `component`s mole fraction as determined from the "
"`full_substance`.",
type_hint=typing.Union[
int,
float,
unit.Measurement,
unit.Quantity,
ParameterGradient,
Observable,
ObservableArray,
],
)
def _weight_values(self, mole_fraction):
"""Weights a value by a components mole fraction.
Parameters
----------
mole_fraction: float
The mole fraction to weight by.
Returns
-------
float, int, openff.evaluator.unit.Measurement, openff.evaluator.unit.Quantity, ParameterGradient
The weighted value.
"""
return self.value * mole_fraction
def _execute(self, directory, available_resources):
assert len(self.component.components) == 1
main_component = self.component.components[0]
amounts = self.full_substance.get_amounts(main_component)
if len(amounts) != 1:
raise ValueError(
f"More than one type of amount was defined for component "
f"{main_component}. Only a single mole fraction must be defined.",
)
amount = next(iter(amounts))
if not isinstance(amount, MoleFraction):
raise ValueError(
f"The component {main_component} was given as an exact amount, and "
f"not a mole fraction"
)
self.weighted_value = self._weight_values(amount.value)
@workflow_protocol()
class FilterSubstanceByRole(Protocol):
"""A protocol which takes a substance as input, and returns a substance which only
contains components whose role match a given criteria.
"""
input_substance = InputAttribute(
docstring="The substance to filter.",
type_hint=Substance,
default_value=UNDEFINED,
)
component_roles = InputAttribute(
docstring="The roles to filter substance components against.",
type_hint=list,
default_value=UNDEFINED,
)
expected_components = InputAttribute(
docstring="The number of components expected to remain after filtering. "
"An exception is raised if this number is not matched.",
type_hint=int,
default_value=UNDEFINED,
optional=True,
)
filtered_substance = OutputAttribute(
docstring="The filtered substance.", type_hint=Substance
)
def _execute(self, directory, available_resources):
filtered_components = []
total_mole_fraction = 0.0
for component in self.input_substance.components:
if component.role not in self.component_roles:
continue
filtered_components.append(component)
amounts = self.input_substance.get_amounts(component)
for amount in amounts:
if not isinstance(amount, MoleFraction):
continue
total_mole_fraction += amount.value
if self.expected_components != UNDEFINED and self.expected_components != len(
filtered_components
):
raise ValueError(
f"The filtered substance does not contain the expected number of "
f"components ({self.expected_components}) - {filtered_components}",
)
inverse_mole_fraction = (
1.0 if np.isclose(total_mole_fraction, 0.0) else 1.0 / total_mole_fraction
)
self.filtered_substance = Substance()
for component in filtered_components:
amounts = self.input_substance.get_amounts(component)
for amount in amounts:
if isinstance(amount, MoleFraction):
amount = MoleFraction(amount.value * inverse_mole_fraction)
self.filtered_substance.add_component(component, amount)
def validate(self, attribute_type=None):
super(FilterSubstanceByRole, self).validate(attribute_type)
assert all(isinstance(x, Component.Role) for x in self.component_roles)
@workflow_protocol()
class DummyProtocol(Protocol):
"""A protocol whose only purpose is to return an input value as an output
value."""
input_value = InputAttribute(
docstring="A dummy input.",
type_hint=typing.Union[
str,
int,
float,
unit.Quantity,
unit.Measurement,
Observable,
ObservableArray,
ParameterGradient,
ParameterGradientKey,
list,
tuple,
dict,
set,
frozenset,
],
default_value=UNDEFINED,
)
output_value = OutputAttribute(
docstring="A dummy output.",
type_hint=typing.Union[
str,
int,
float,
unit.Quantity,
unit.Measurement,
Observable,
ObservableArray,
ParameterGradient,
ParameterGradientKey,
list,
tuple,
dict,
set,
frozenset,
],
)
def _execute(self, directory, available_resources):
self.output_value = self.input_value
|
jaketanderson/openff-evaluator
|
integration-tests/virtual-sites/run-tip4p.py
|
<gh_stars>10-100
import shutil
from openff.toolkit.typing.engines.smirnoff import ForceField, ParameterList
from openff.units import unit
from openmm import unit as openmm_unit
from openff.evaluator.forcefield import ParameterGradientKey, SmirnoffForceFieldSource
from openff.evaluator.protocols.coordinates import BuildCoordinatesPackmol
from openff.evaluator.protocols.forcefield import BuildSmirnoffSystem
from openff.evaluator.protocols.openmm import OpenMMEnergyMinimisation, OpenMMSimulation
from openff.evaluator.substances import Substance
from openff.evaluator.thermodynamics import Ensemble, ThermodynamicState
def tip4p_force_field() -> ForceField:
force_field = ForceField()
constraint_handler = force_field.get_parameter_handler("Constraints")
constraint_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0:2]-[#1]",
"distance": 0.9572 * openmm_unit.angstrom,
}
)
constraint_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0]-[#1:2]",
"distance": 1.5139 * openmm_unit.angstrom,
}
)
vdw_handler = force_field.get_parameter_handler("vdW")
vdw_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0]-[#1]",
"epsilon": (
78.0
* openmm_unit.kelvin
* openmm_unit.BOLTZMANN_CONSTANT_kB
* openmm_unit.AVOGADRO_CONSTANT_NA
),
"sigma": 3.154 * openmm_unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#1]-[#8X2H2+0:1]-[#1]",
"epsilon": 0.0 * openmm_unit.kilojoules_per_mole,
"sigma": 1.0 * openmm_unit.angstrom,
}
)
force_field.get_parameter_handler("Electrostatics")
force_field.get_parameter_handler(
"ChargeIncrementModel",
{"version": "0.3", "partial_charge_method": "formal_charge"},
)
virtual_site_handler = force_field.get_parameter_handler("VirtualSites")
virtual_site_handler.add_parameter(
{
"smirks": "[#1:1]-[#8X2H2+0:2]-[#1:3]",
"type": "DivalentLonePair",
"distance": -0.15 * openmm_unit.angstrom,
"outOfPlaneAngle": 0.0 * openmm_unit.degrees,
"match": "once",
"charge_increment1": 0.52 * openmm_unit.elementary_charge,
"charge_increment2": 0.0 * openmm_unit.elementary_charge,
"charge_increment3": 0.52 * openmm_unit.elementary_charge,
}
)
# Currently required due to OpenFF issue #884
virtual_site_handler._parameters = ParameterList(virtual_site_handler._parameters)
return force_field
def main():
force_field = tip4p_force_field()
substance = Substance.from_components("O")
with open("force-field.json", "w") as file:
file.write(SmirnoffForceFieldSource.from_object(force_field).json())
build_coordinates = BuildCoordinatesPackmol("")
build_coordinates.substance = substance
build_coordinates.max_molecules = 216
build_coordinates.execute("build-coords")
apply_parameters = BuildSmirnoffSystem("")
apply_parameters.force_field_path = "force-field.json"
apply_parameters.coordinate_file_path = build_coordinates.coordinate_file_path
apply_parameters.substance = substance
apply_parameters.execute("apply-params")
minimize = OpenMMEnergyMinimisation("")
minimize.input_coordinate_file = build_coordinates.coordinate_file_path
minimize.parameterized_system = apply_parameters.parameterized_system
minimize.execute("minimize-coords")
npt = OpenMMSimulation("")
npt.input_coordinate_file = minimize.output_coordinate_file
npt.parameterized_system = apply_parameters.parameterized_system
npt.ensemble = Ensemble.NPT
npt.thermodynamic_state = ThermodynamicState(
temperature=298.15 * unit.kelvin, pressure=1.0 * unit.atmosphere
)
npt.steps_per_iteration = 500
npt.total_number_of_iterations = 2
npt.gradient_parameters = [
ParameterGradientKey(
tag="vdW", smirks="[#1:1]-[#8X2H2+0]-[#1]", attribute="epsilon"
)
]
npt.output_frequency = 50
npt.execute("run-npt")
shutil.copytree("run-npt", "run-npt-1")
npt.total_number_of_iterations = 4
npt.execute("run-npt")
assert len(npt.observables) == 40
assert len(npt.observables["PotentialEnergy"].gradients) == 1
assert len(npt.observables["PotentialEnergy"].gradients[0]) == 40
if __name__ == "__main__":
main()
|
jaketanderson/openff-evaluator
|
openff/evaluator/datasets/utilities.py
|
from typing import TYPE_CHECKING, Set, Tuple
if TYPE_CHECKING:
import pandas
def reorder_data_frame(data_frame: "pandas.DataFrame") -> "pandas.DataFrame":
"""Re-order the substance columns of a data frame so that the individual
components are alphabetically sorted.
Parameters
----------
data_frame: pandas.DataFrame
The data frame to re-order.
Returns
-------
pandas.DataFrame
The re-ordered data frame.
"""
import numpy
import pandas
min_n_components = data_frame["N Components"].min()
max_n_components = data_frame["N Components"].max()
if max_n_components > 2:
raise NotImplementedError(
"Reordering more than 2 components has not yet been robustly tested."
)
ordered_frames = []
for n_components in range(min_n_components, max_n_components + 1):
component_frame = data_frame[data_frame["N Components"] == n_components]
ordered_frame = data_frame[data_frame["N Components"] == n_components].copy()
component_headers = [f"Component {i + 1}" for i in range(n_components)]
component_order = numpy.argsort(component_frame[component_headers], axis=1)
substance_headers = ["Component", "Role", "Mole Fraction", "Exact Amount"]
for component_index in range(n_components):
indices = component_order[f"Component {component_index + 1}"]
for substance_header in substance_headers:
component_header = f"{substance_header} {component_index + 1}"
for replacement_index in range(n_components):
if component_index == replacement_index:
continue
replacement_header = f"{substance_header} {replacement_index + 1}"
ordered_frame[component_header] = numpy.where(
indices == replacement_index,
numpy.nan
if replacement_header not in component_frame
else component_frame[replacement_header],
numpy.nan
if component_header not in component_frame
else component_frame[component_header],
)
ordered_frames.append(ordered_frame)
ordered_data_frame = pandas.concat(ordered_frames, ignore_index=True, sort=False)
return ordered_data_frame
def data_frame_to_substances(data_frame: "pandas.DataFrame") -> Set[Tuple[str, ...]]:
"""Extracts all unique substances from a data frame and returns them
as a set, where each element in the set is a tuple of smiles patterns
which represent a single substance.
Parameters
----------
data_frame
The data frame to extract the substances from.
Returns
-------
The set of unique substances.
"""
if len(data_frame) == 0:
return set()
ordered_data = reorder_data_frame(data_frame)
substances: Set[Tuple[str, ...]] = set()
min_n_components = data_frame["N Components"].min()
max_n_components = data_frame["N Components"].max()
for n_components in range(min_n_components, max_n_components + 1):
component_data = ordered_data[ordered_data["N Components"] == n_components]
component_columns = (
component_data[f"Component {i + 1}"] for i in range(n_components)
)
substances.update(list(zip(*component_columns)))
return substances
|
jaketanderson/openff-evaluator
|
openff/evaluator/forcefield/__init__.py
|
from .forcefield import (
ForceFieldSource,
LigParGenForceFieldSource,
SmirnoffForceFieldSource,
TLeapForceFieldSource,
)
from .gradients import ParameterGradient, ParameterGradientKey
__all__ = [
ForceFieldSource,
SmirnoffForceFieldSource,
LigParGenForceFieldSource,
TLeapForceFieldSource,
ParameterGradient,
ParameterGradientKey,
]
|
jaketanderson/openff-evaluator
|
openff/evaluator/substances/components.py
|
<filename>openff/evaluator/substances/components.py
"""
An API for defining and creating substances.
"""
from enum import Enum
from openff.evaluator.attributes import UNDEFINED, Attribute, AttributeClass
class Component(AttributeClass):
"""Defines a single component in a chemical system, as well
as it's role within the system (if any).
"""
class Role(Enum):
"""An enum which describes the role of a component in the system,
such as whether the component is a solvent, a solute, a receptor etc.
These roles are mainly used by workflow to identify the correct
species in a system, such as when doing docking or performing
solvation free energy calculations.
"""
Solvent = "solv"
Solute = "sol"
Ligand = "lig"
Receptor = "rec"
smiles = Attribute(
docstring="The SMILES pattern which describes this component.",
type_hint=str,
read_only=True,
)
role = Attribute(
docstring="The role of this component in the system.",
type_hint=Role,
default_value=Role.Solvent,
read_only=True,
)
@property
def identifier(self):
"""str: A unique identifier for this component."""
return f"{self.smiles}{{{self.role.value}}}"
def __init__(self, smiles=UNDEFINED, role=Role.Solvent):
"""Constructs a new Component object with either a label or
a smiles string, but not both.
Notes
-----
The `label` and `smiles` arguments are mutually exclusive, and only
one can be passed while the other should be `None`.
Parameters
----------
smiles: str
A SMILES descriptor of the component
role: Component.Role
The role of this component in the system.
"""
if smiles != UNDEFINED:
smiles = self._standardize_smiles(smiles)
self._set_value("smiles", smiles)
self._set_value("role", role)
@staticmethod
def _standardize_smiles(smiles):
"""Standardizes a SMILES pattern to be canonical (but not necessarily isomeric)
using the `cmiles` library.
Parameters
----------
smiles: str
The SMILES pattern to standardize.
Returns
-------
The standardized SMILES pattern.
"""
from cmiles.utils import load_molecule, mol_to_smiles
molecule = load_molecule(smiles, toolkit="rdkit")
try:
# Try to make the smiles isomeric.
smiles = mol_to_smiles(
molecule, isomeric=True, explicit_hydrogen=False, mapped=False
)
except ValueError:
# Fall-back to non-isomeric.
smiles = mol_to_smiles(
molecule, isomeric=False, explicit_hydrogen=False, mapped=False
)
return smiles
def __str__(self):
return self.identifier
def __repr__(self):
return f"<{self.__class__.__name__} {str(self)}>"
def __hash__(self):
return hash(self.identifier)
def __eq__(self, other):
return type(self) == type(other) and self.identifier == other.identifier
def __ne__(self, other):
return not (self == other)
def __setstate__(self, state):
# Make sure the smiles pattern is standardized.
state["smiles"] = Component._standardize_smiles(state["smiles"])
super(Component, self).__setstate__(state)
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_utils/test_timeseries.py
|
<gh_stars>10-100
"""
Units tests for openff.evaluator.utils.statistics
"""
import numpy as np
from pymbar.timeseries import detectEquilibration
from openff.evaluator.utils.timeseries import (
analyze_time_series,
get_uncorrelated_indices,
)
def test_analyze_time_series_std():
"""Test the ``analyze_time_series`` utility with flat data."""
statistics = analyze_time_series(np.ones(10))
assert statistics.n_total_points == 10
assert statistics.n_uncorrelated_points == 1
assert np.isclose(statistics.statistical_inefficiency, 10.0)
assert statistics.equilibration_index == 0
def test_analyze_time_series():
"""Compare the output of the ``analyze_time_series`` utility with ``pymbar``."""
np.random.seed(4)
random_array = np.random.rand(10)
statistics = analyze_time_series(random_array, minimum_samples=3)
expected_index, expected_value, _ = detectEquilibration(random_array, fast=False)
assert expected_index == statistics.equilibration_index
assert np.isclose(statistics.statistical_inefficiency, expected_value)
assert statistics.n_total_points == 10
assert 0 < statistics.n_uncorrelated_points <= 10
assert 0 <= statistics.equilibration_index < 10
def test_get_uncorrelated_indices():
uncorrelated_indices = get_uncorrelated_indices(4, 2.0)
assert uncorrelated_indices == [0, 2]
|
jaketanderson/openff-evaluator
|
openff/evaluator/datasets/curation/components/__init__.py
|
<gh_stars>10-100
from openff.evaluator.datasets.curation.components.components import (
CurationComponent,
CurationComponentSchema,
)
__all__ = [CurationComponent, CurationComponentSchema]
|
jaketanderson/openff-evaluator
|
openff/evaluator/datasets/thermoml/thermoml.py
|
<filename>openff/evaluator/datasets/thermoml/thermoml.py<gh_stars>10-100
"""
An API for importing a ThermoML archive.
"""
import copy
import logging
import re
import traceback
from enum import Enum, unique
from urllib.error import HTTPError
from xml.etree import ElementTree
import numpy as np
import requests
from openff.units import unit
from openff.units.openmm import from_openmm
from openff.evaluator.datasets import (
MeasurementSource,
PhysicalPropertyDataSet,
PropertyPhase,
)
from openff.evaluator.substances import Component, MoleFraction, Substance
from openff.evaluator.thermodynamics import ThermodynamicState
logger = logging.getLogger(__name__)
def _unit_from_thermoml_string(full_string):
"""Extract the unit from a ThermoML property name.
Parameters
----------
full_string: str
The string to convert to a Unit object.
Returns
----------
openff.evaluator.unit.Unit
The parsed unit.
"""
full_string_split = full_string.split(",")
# Extract the unit portion of the string
unit_string = full_string_split[1] if len(full_string_split) > 1 else ""
# Convert symbols like dm3 to dm**3
unit_string = re.sub(r"([a-z])([0-9]+)", r"\1**\2", unit_string.strip())
return unit.Unit(unit_string)
def _phase_from_thermoml_string(string):
"""Converts a ThermoML string to a PropertyPhase
Parameters
----------
string: str
The string to convert to a PropertyPhase
Returns
----------
PropertyPhase
The converted PropertyPhase
"""
phase_string = string.lower().strip()
phase = PropertyPhase.Undefined
if phase_string == "liquid" or phase_string.find("solution") >= 0:
phase = PropertyPhase.Liquid
elif phase_string.find("crystal") >= 0 and not phase_string.find("liquid") >= 0:
phase = PropertyPhase.Solid
elif phase_string.find("gas") >= 0:
phase = PropertyPhase.Gas
return phase
@unique
class _ConstraintType(Enum):
"""An enumeration of the supported types of ThermoML constraint
types.
"""
Undefined = "Undefined"
Temperature = "Temperature, K"
Pressure = "Pressure, kPa"
ComponentMoleFraction = "Mole fraction"
ComponentMassFraction = "Mass fraction"
ComponentMolality = "Molality, mol/kg"
SolventMoleFraction = "Solvent: Mole fraction"
SolventMassFraction = "Solvent: Mass fraction"
SolventMolality = "Solvent: Molality, mol/kg"
@staticmethod
def from_node(node):
"""Converts either a ConstraintType or VariableType xml node to a _ConstraintType.
Parameters
----------
node: xml.etree.Element
The xml node to convert.
Returns
----------
_ConstraintType
The converted constraint type.
"""
try:
constraint_type = _ConstraintType(node.text)
except (KeyError, ValueError):
constraint_type = _ConstraintType.Undefined
if constraint_type == _ConstraintType.Undefined:
logging.debug(f"{node.tag}->{node.text} is an unsupported constraint type.")
return constraint_type
def is_composition_constraint(self):
"""Checks whether the purpose of this constraint is
to constrain the substance composition.
Returns
-------
bool
True if the constraint type is either a
- `_ConstraintType.ComponentMoleFraction`
- `_ConstraintType.ComponentMassFraction`
- `_ConstraintType.ComponentMolality`
- `_ConstraintType.SolventMoleFraction`
- `_ConstraintType.SolventMassFraction`
- `_ConstraintType.SolventMolality`
"""
return (
self == _ConstraintType.ComponentMoleFraction
or self == _ConstraintType.ComponentMassFraction
or self == _ConstraintType.ComponentMolality
or self == _ConstraintType.SolventMoleFraction
or self == _ConstraintType.SolventMassFraction
or self == _ConstraintType.SolventMolality
)
class _Constraint:
"""A wrapper around a ThermoML `Constraint` node. A constraint
in ThermoML encompasses such constructs as temperature, pressure
or composition at which a measurement was recorded.
"""
def __init__(self):
self.type = _ConstraintType.Undefined
self.value = 0.0
self.solvents = []
# Describes which compound the variable acts upon.
self.compound_index = None
@classmethod
def from_node(cls, constraint_node, namespace):
"""Creates a _Constraint from an xml node.
Parameters
----------
constraint_node: Element
The xml node to convert.
namespace: dict of str and str
The xml namespace.
Returns
----------
_Constraint, optional
The extracted constraint if the constraint type is supported,
otherwise `None`.
"""
# Extract the xml nodes.
type_node = constraint_node.find(".//ThermoML:ConstraintType/*", namespace)
value_node = constraint_node.find("./ThermoML:nConstraintValue", namespace)
solvent_index_nodes = constraint_node.find(
"./ThermoML:Solvent//ThermoML:nOrgNum", namespace
)
compound_index_node = constraint_node.find(
"./ThermoML:ConstraintID/ThermoML:RegNum/*", namespace
)
value = float(value_node.text)
# Determine what the default unit for this variable should be.
unit_type = _unit_from_thermoml_string(type_node.text)
return_value = cls()
return_value.type = _ConstraintType.from_node(type_node)
return_value.value = value * unit_type
if compound_index_node is not None:
return_value.compound_index = int(compound_index_node.text)
if solvent_index_nodes is not None:
for solvent_index_node in solvent_index_nodes:
return_value.solvents.append(int(solvent_index_node))
return None if return_value.type is _ConstraintType.Undefined else return_value
@classmethod
def from_variable(cls, variable, value):
"""Creates a _Constraint from an existing
`_VariableDefinition` variable definition.
Parameters
----------
variable: _VariableDefinition
The variable to convert.
value: openff.evaluator.unit.Quantity
The value of the constant.
Returns
----------
_Constraint
The created constraint.
"""
return_value = cls()
return_value.type = variable.type
return_value.compound_index = variable.compound_index
return_value.solvents.extend(variable.solvents)
return_value.value = value
return return_value
class _VariableDefinition:
"""A wrapper around a ThermoML Variable node. A variable in
ThermoML is essentially just the definition of a `Constraint`
(the constraint type, the expected units, etc.) whose value is
defined inside of another ThermoML node.
"""
def __init__(self):
self.index = -1
self.type = _ConstraintType.Undefined
self.solvents = []
self.default_unit = None
# Describes which compound the variable acts upon.
self.compound_index = None
@classmethod
def from_node(cls, variable_node, namespace):
"""Creates a `_VariableDefinition` from an xml node.
Parameters
----------
variable_node: xml.etree.Element
The xml node to convert.
namespace: dict of str and str
The xml namespace.
Returns
----------
_VariableDefinition
The created variable definition.
"""
# Extract the xml nodes.
type_node = variable_node.find(".//ThermoML:VariableType/*", namespace)
index_node = variable_node.find("ThermoML:nVarNumber", namespace)
solvent_index_nodes = variable_node.find(
"./ThermoML:Solvent//ThermoML:nOrgNum", namespace
)
compound_index_node = variable_node.find(
"./ThermoML:VariableID/ThermoML:RegNum/*", namespace
)
return_value = cls()
return_value.default_unit = _unit_from_thermoml_string(type_node.text)
return_value.index = int(index_node.text)
return_value.type = _ConstraintType.from_node(type_node)
if compound_index_node is not None:
return_value.compound_index = int(compound_index_node.text)
if solvent_index_nodes is not None:
for solvent_index_node in solvent_index_nodes:
return_value.solvents.append(int(solvent_index_node))
return None if return_value.type is _ConstraintType.Undefined else return_value
class _PropertyUncertainty:
"""A wrapper around a ThermoML PropUncertainty node."""
# Reduce code redundancy by reusing this class for
# both property and combined uncertainties.
prefix = ""
def __init__(self):
self.index = -1
self.coverage_factor = None
@classmethod
def from_xml(cls, node, namespace):
"""Creates a _PropertyUncertainty from an xml node.
Parameters
----------
node: Element
The xml node to convert.
namespace: dict of str and str
The xml namespace.
Returns
----------
_Compound
The created property uncertainty.
"""
coverage_factor_node = node.find(
f"ThermoML:n{cls.prefix}CoverageFactor", namespace
)
confidence_node = node.find(
f"ThermoML:n{cls.prefix}UncertLevOfConfid", namespace
)
# As defined by https://www.nist.gov/pml/nist-technical-note-1297/nist-tn-1297-7-reporting-uncertainty
if coverage_factor_node is not None:
coverage_factor = float(coverage_factor_node.text)
elif confidence_node is not None and confidence_node.text == "95":
coverage_factor = 2
else:
return None
index_node = node.find(f"ThermoML:n{cls.prefix}UncertAssessNum", namespace)
index = int(index_node.text)
return_value = cls()
return_value.coverage_factor = coverage_factor
return_value.index = index
return return_value
class _CombinedUncertainty(_PropertyUncertainty):
"""A wrapper around a ThermoML CombPropUncertainty node."""
prefix = "Comb"
class _Compound:
"""A wrapper around a ThermoML Compound node."""
def __init__(self):
self.smiles = None
self.index = -1
@staticmethod
def smiles_from_inchi_string(inchi_string):
"""Attempts to create a SMILES pattern from an inchi string.
Parameters
----------
inchi_string: str
The InChI string to convert.
Returns
----------
str, optional
None if the identifier cannot be converted, otherwise the converted SMILES pattern.
"""
from cmiles.utils import mol_to_smiles
try:
import rdkit.Chem
except ImportError:
return None
if inchi_string is None:
raise ValueError("The InChI string cannot be `None`.")
molecule = rdkit.Chem.MolFromInchi(inchi_string, removeHs=False)
if not molecule:
raise ValueError(f"The InchI string ({inchi_string}) could not be parsed")
try:
return mol_to_smiles(molecule, explicit_hydrogen=False, mapped=False)
except ValueError:
return None
@staticmethod
def smiles_from_thermoml_smiles_string(thermoml_string):
"""Attempts to create a SMILES pattern from a thermoml smiles string.
Parameters
----------
thermoml_string: str
The string to convert.
Returns
----------
str, optional
None if the identifier cannot be converted, otherwise the converted SMILES pattern.
"""
from cmiles.utils import load_molecule, mol_to_smiles
if thermoml_string is None:
raise ValueError("The string cannot be `None`.")
molecule = load_molecule(thermoml_string, toolkit="rdkit")
return mol_to_smiles(molecule, explicit_hydrogen=False, mapped=False)
@staticmethod
def smiles_from_common_name(common_name):
"""Attempts to create a SMILES pattern from an IUPAC name.
Parameters
----------
common_name: str
The common name to convert.
Returns
----------
str, None
None if the identifier cannot be converted, otherwise the converted SMILES pattern.
"""
from cmiles.utils import load_molecule, mol_to_smiles
from openff.toolkit.topology import Molecule
from openff.toolkit.utils import InvalidIUPACNameError, LicenseError
if common_name is None:
return None
try:
molecule = Molecule.from_iupac(common_name, allow_undefined_stereo=True)
cmiles_molecule = load_molecule(molecule.to_smiles(), toolkit="rdkit")
smiles = mol_to_smiles(
cmiles_molecule, explicit_hydrogen=False, mapped=False
)
if isinstance(smiles, str) and len(smiles) == 0:
smiles = None
except LicenseError:
smiles = None
except (ValueError, InvalidIUPACNameError):
smiles = None
return smiles
@classmethod
def from_xml_node(cls, node, namespace):
"""Creates a _Compound from an xml node.
Parameters
----------
node: Element
The xml node to convert.
namespace: dict of str and str
The xml namespace.
Returns
----------
_Compound
The created compound wrapper.
"""
# Gather up all possible identifiers
inchi_identifier_nodes = node.findall("ThermoML:sStandardInChI", namespace)
smiles_identifier_nodes = node.findall("ThermoML:sSmiles", namespace)
common_identifier_nodes = node.findall("ThermoML:sCommonName", namespace)
smiles = None
if (
len(inchi_identifier_nodes) > 0
and inchi_identifier_nodes[0].text is not None
):
# Convert InChI key to a smiles pattern.
smiles = cls.smiles_from_inchi_string(inchi_identifier_nodes[0].text)
if (
smiles is None
and len(smiles_identifier_nodes) > 0
and smiles_identifier_nodes[0].text is not None
):
# Standardise the smiles pattern using OE.
smiles = cls.smiles_from_thermoml_smiles_string(
smiles_identifier_nodes[0].text
)
if (
smiles is None
and len(common_identifier_nodes) > 0
and common_identifier_nodes[0].text is not None
):
# Convert the common name to a smiles pattern.
smiles = cls.smiles_from_common_name(common_identifier_nodes[0].text)
if smiles is None:
logging.debug(
"A ThermoML:Compound node does not have a valid InChI identifier, "
"a valid SMILES pattern, or an understandable common name."
)
return None
index_node = node.find("./ThermoML:RegNum/*", namespace)
if index_node is None:
raise ValueError("A ThermoML:Compound does not have an index defined.")
compound_index = int(index_node.text)
return_value = cls()
return_value.smiles = smiles
return_value.index = compound_index
return return_value
class _PureOrMixtureData:
"""A wrapper around a ThermoML PureOrMixtureData node."""
@staticmethod
def extract_compound_indices(node, namespace, compounds):
"""Extract a list of the compound indices which a given `PureOrMixtureData`
node depends upon.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
compounds: dict of int and _Compound
The compounds which were able to be parsed from the
parent archive file, with keys of their assigned
indices.
"""
component_nodes = node.findall("ThermoML:Component", namespace)
compound_indices = []
# Figure out which compounds are going to be associated with
# the property entries.
for component_node in component_nodes:
index_node = component_node.find("./ThermoML:RegNum/*", namespace)
compound_index = int(index_node.text)
if compound_index not in compounds:
logging.debug(
"A PureOrMixtureData entry depends on an "
"unsupported compound and has been ignored"
)
return None
if compound_index in compound_indices:
raise ValueError(
"A ThermoML:PureOrMixtureData states its dependency on the "
"same compound twice."
)
compound_indices.append(compound_index)
return compound_indices
@staticmethod
def extract_property_definitions(node, namespace, parent_phases):
"""Extract those property definitions defined by a PureOrMixtureData
node. The extracted definitions are not extracted, as these a defined
elsewhere in the archive file.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
parent_phases: PropertyPhase
The phases specified by the parent PureOrMixtureData node.
Returns
----------
dict of int and ThermoMLProperty
The extracted property definitions with keys of their
assigned indices.
"""
property_nodes = node.findall("ThermoML:Property", namespace)
properties = {}
for property_node in property_nodes:
property_definition = ThermoMLProperty.from_xml_node(
property_node, namespace, parent_phases
)
if property_definition is None:
continue
if property_definition.index in properties:
raise ValueError(
"A ThermoML data set contains two properties with the same index"
)
properties[property_definition.index] = property_definition
return properties
@staticmethod
def validate_constraint(constraint, compounds):
"""Validates a constraint object - this may be either
a full `_Constraint` or just a `_VariableDefinition`.
Parameters
----------
constraint: _Constraint or _VariableDefinition
The constraint to validate.
compounds: dict of int and _Compound
A dictionary of the compounds the parent PureOrMixtureData was
measured for.
Returns
-------
bool
True if the constraint is valid, False otherwise.
"""
if constraint is None or constraint.type is _ConstraintType.Undefined:
logging.debug("An unsupported constraint has been ignored.")
return False
if (
constraint.compound_index is not None
and constraint.compound_index not in compounds
):
logging.debug(
"A constraint exists upon a non-existent compound and will be ignored."
)
return False
if (
constraint.type.is_composition_constraint()
and constraint.compound_index is None
):
logging.debug(
"An unsupported constraint has been ignored - composition constraints"
"need to have a corresponding compound_index."
)
return False
return True
@staticmethod
def extract_global_constraints(node, namespace, compounds):
"""Extract the constraints which should be applied to all of
the properties defined in a `PureOrMixtureData` node.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
compounds: dict of int and _Compound
A dictionary of the compounds this PureOrMixtureData was
measured for.
Returns
----------
list of _Constraint, optional
The extracted constraints if all could be parsed,
otherwise `None`.
"""
constraint_nodes = node.findall("ThermoML:Constraint", namespace)
constraints = []
for constraint_node in constraint_nodes:
constraint = _Constraint.from_node(constraint_node, namespace)
if not _PureOrMixtureData.validate_constraint(constraint, compounds):
return None
constraints.append(constraint)
return constraints
@staticmethod
def extract_variable_definitions(node, namespace, compounds):
"""Extract all of the 'variables' in a PureOrMixtureData node.
These are simply constraints whose values are defined elsewhere
in the archive.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
compounds: dict of int and _Compound
A dictionary of the compounds this PureOrMixtureData was calculated for.
Returns
----------
dict of int and _VariableDefinition
The extracted variable definitions which could be parsed.
"""
variable_nodes = node.findall("ThermoML:Variable", namespace)
variables = {}
for variable_node in variable_nodes:
variable = _VariableDefinition.from_node(variable_node, namespace)
if not _PureOrMixtureData.validate_constraint(variable, compounds):
continue
variables[variable.index] = variable
return variables
@staticmethod
def extract_uncertainty(node, namespace, property_definition):
"""Extracts the uncertainties on the measured properties
contained in this `PureOrMixtureData` node.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
property_definition: ThermoMLProperty
The property to which this uncertainty is attached.
Returns
-------
float, optional
The uncertainty in the property if it can be parsed or
if one is present, otherwise `None`.
"""
# Look for a standard uncertainty..
uncertainty_node = node.find(".//ThermoML:nCombStdUncertValue", namespace)
if uncertainty_node is None:
uncertainty_node = node.find(".//ThermoML:nStdUncertValue", namespace)
# We have found a std. uncertainty
if uncertainty_node is not None:
return float(uncertainty_node.text)
# Try to calculate uncertainty from a coverage factor if present
if (
len(property_definition.combined_uncertainty_definitions) == 0
and len(property_definition.property_uncertainty_definitions) == 0
):
return None
combined = len(property_definition.combined_uncertainty_definitions) > 0
prefix = (
_CombinedUncertainty.prefix if combined else _PropertyUncertainty.prefix
)
if combined:
index_node = node.find(
"./ThermoML:CombinedUncertainty/ThermoML:nCombUncertAssessNum",
namespace,
)
else:
index_node = node.find(
"./ThermoML:PropUncertainty/ThermoML:nUncertAssessNum", namespace
)
expanded_uncertainty_node = node.find(
".//ThermoML:n" + prefix + "ExpandUncertValue", namespace
)
if index_node is None or expanded_uncertainty_node is None:
return None
expanded_uncertainty = float(expanded_uncertainty_node.text)
index = int(index_node.text)
if (
combined
and index not in property_definition.combined_uncertainty_definitions
):
return None
if (
not combined
and index not in property_definition.property_uncertainty_definitions
):
return None
divisor = (
property_definition.combined_uncertainty_definitions[index].coverage_factor
if combined
else property_definition.property_uncertainty_definitions[
index
].coverage_factor
)
return expanded_uncertainty / divisor
@staticmethod
def _smiles_to_molecular_weight(smiles):
"""Calculates the molecular weight of a substance specified
by a smiles string.
Parameters
----------
smiles: str
The smiles string to calculate the molecular weight of.
Returns
-------
openff.evaluator.unit.Quantity
The molecular weight.
"""
from openff.toolkit.topology import Molecule
try:
from openmm import unit as openmm_unit
except ImportError:
from simtk.openmm import unit as openmm_unit
try:
molecule = Molecule.from_smiles(smiles)
except Exception as e:
formatted_exception = traceback.format_exception(None, e, e.__traceback__)
raise ValueError(
f"The toolkit raised an exception for the "
f"{smiles} smiles pattern: {formatted_exception}"
)
molecular_weight = 0.0 * openmm_unit.dalton
for atom in molecule.atoms:
molecular_weight += atom.mass
return from_openmm(molecular_weight)
@staticmethod
def _solvent_mole_fractions_to_moles(
solvent_mass, solvent_mole_fractions, solvent_compounds
):
"""Converts a set of solvent mole fractions to moles for a
given mass of solvent.
Parameters
----------
solvent_mass: openff.evaluator.unit.Quantity
The total mass of the solvent in units compatible with kg.
solvent_mole_fractions: dict of int and float
The mole fractions of any solvent compounds in the system.
solvent_compounds: dict of int and float
A dictionary of any solvent compounds in the system.
Returns
-------
dict of int and openff.evaluator.unit.Quantity
A dictionary of the moles of each solvent compound.
"""
weighted_molecular_weights = 0.0 * unit.gram / unit.mole
number_of_moles = {}
for solvent_index in solvent_compounds:
solvent_smiles = solvent_compounds[solvent_index].smiles
solvent_fraction = solvent_mole_fractions[solvent_index]
solvent_weight = _PureOrMixtureData._smiles_to_molecular_weight(
solvent_smiles
)
weighted_molecular_weights += solvent_weight * solvent_fraction
total_solvent_moles = solvent_mass / weighted_molecular_weights
for solvent_index in solvent_compounds:
moles = solvent_mole_fractions[solvent_index] * total_solvent_moles
number_of_moles[solvent_index] = moles
return number_of_moles
@staticmethod
def _convert_mole_fractions(constraints, compounds, solvent_mole_fractions=None):
"""Converts a set of `_Constraint` to mole fractions.
Parameters
----------
constraints: list of _Constraint
The constraints to convert.
compounds: dict of int and _Compound
The compounds in the system.
solvent_mole_fractions: dict of int and float
The mole fractions of any solvent compounds in the system,
where the total mole fraction of all solvents must be equal
to one.
Returns
-------
dict of int and float
A dictionary of compound indices and mole fractions.
"""
# noinspection PyTypeChecker
number_of_constraints = len(constraints)
mole_fractions = {}
total_mol_fraction = 0.0
for constraint in constraints:
mole_fraction = constraint.value
if isinstance(mole_fraction, unit.Quantity):
mole_fraction = mole_fraction.to(unit.dimensionless).magnitude
mole_fractions[constraint.compound_index] = mole_fraction
total_mol_fraction += mole_fractions[constraint.compound_index]
if (
number_of_constraints != len(compounds)
and solvent_mole_fractions is not None
) or (
number_of_constraints != len(compounds) - 1
and number_of_constraints != len(compounds)
and solvent_mole_fractions is None
):
raise ValueError(
f"The number of mole fraction constraints ({number_of_constraints}) must be one "
f"less than or equal to the number of compounds being constrained ({len(compounds)}) "
f"if a solvent list is not present, otherwise there must be an equal number."
)
# Handle the case were a single mole fraction constraint is missing.
if number_of_constraints == len(compounds) - 1:
for compound_index in compounds:
if compound_index in mole_fractions:
continue
mole_fractions[compound_index] = 1.0 - total_mol_fraction
# Recompute the total mole fraction to be safe.
total_mol_fraction = 0.0
for compound_index in mole_fractions:
total_mol_fraction += mole_fractions[compound_index]
# Account for any solvent present.
if solvent_mole_fractions is not None:
# Assume the remainder of the mole fraction is the solvent.
remaining_mole_fraction = 1.0 - total_mol_fraction
for solvent_index in solvent_mole_fractions:
mole_fractions[solvent_index] = (
solvent_mole_fractions[solvent_index] * remaining_mole_fraction
)
return mole_fractions
@staticmethod
def _convert_mass_fractions(
constraints, compounds, solvent_mole_fractions=None, solvent_compounds=None
):
"""Converts a set of `_Constraint` to mole fractions.
Parameters
----------
constraints: list of _Constraint
The constraints to convert.
compounds: dict of int and _Compound
The compounds in the system.
solvent_mole_fractions: dict of int and float
The mole fractions of any solvent compounds in the system,
where the total mole fraction of all solvents must be equal
to one.
solvent_compounds: dict of int and float
A dictionary of any explicitly defined solvent compounds in the
system.
Returns
-------
dict of int and float
A dictionary of compound indices and mole fractions.
"""
# noinspection PyTypeChecker
number_of_constraints = len(constraints)
mass_fractions = {}
total_mass_fraction = 0.0
for constraint in constraints:
mass_fraction = constraint.value
if isinstance(mass_fraction, unit.Quantity):
mass_fraction = mass_fraction.to(unit.dimensionless).magnitude
mass_fractions[constraint.compound_index] = mass_fraction
total_mass_fraction += mass_fraction
if (
number_of_constraints != len(compounds)
and solvent_mole_fractions is not None
) or (
number_of_constraints != len(compounds) - 1
and number_of_constraints != len(compounds)
and solvent_mole_fractions is None
):
raise ValueError(
f"The number of mass fraction constraints ({number_of_constraints}) must be one "
f"less than or equal to the number of compounds being constrained ({len(compounds)}) "
f"if a solvent list is not present, otherwise there must be an equal number."
)
# Handle the case were a single mass fraction constraint is missing.
if number_of_constraints == len(compounds) - 1:
for compound_index in compounds:
if compound_index in mass_fractions:
continue
mass_fractions[compound_index] = 1.0 - total_mass_fraction
if isinstance(mass_fractions[compound_index], unit.Quantity):
mass_fractions[compound_index] = (
mass_fractions[compound_index].to(unit.dimensionless).magnitude
)
total_mass = 1 * unit.gram
total_solvent_mass = total_mass
moles = {}
total_moles = 0.0 * unit.mole
for compound_index in compounds:
compound_smiles = compounds[compound_index].smiles
compound_weight = _PureOrMixtureData._smiles_to_molecular_weight(
compound_smiles
)
moles[compound_index] = (
total_mass * mass_fractions[compound_index] / compound_weight
)
total_moles += moles[compound_index]
total_solvent_mass -= total_mass * mass_fractions[compound_index]
if (
number_of_constraints == len(compounds)
and solvent_mole_fractions is not None
):
solvent_moles = _PureOrMixtureData._solvent_mole_fractions_to_moles(
total_solvent_mass, solvent_mole_fractions, solvent_compounds
)
for solvent_index in solvent_moles:
moles[solvent_index] = solvent_moles[solvent_index]
total_moles += solvent_moles[solvent_index]
mole_fractions = {}
for compound_index in moles:
mole_fraction = moles[compound_index] / total_moles
mole_fractions[compound_index] = mole_fraction
return mole_fractions
@staticmethod
def _convert_molality(
constraints, compounds, solvent_mole_fractions=None, solvent_compounds=None
):
"""Converts a set of `_Constraint` to mole fractions.
Parameters
----------
constraints: list of _Constraint
The constraints to convert.
compounds: dict of int and _Compound
The compounds in the system.
solvent_mole_fractions: dict of int and float
The mole fractions of any solvent compounds in the system,
where the total mole fraction of all solvents must be equal
to one.
solvent_compounds: dict of int and float
A dictionary of any explicitly defined solvent compounds in the
system.
Returns
-------
dict of int and float
A dictionary of compound indices and mole fractions.
"""
number_of_moles = {}
# noinspection PyTypeChecker
number_of_constraints = len(constraints)
mole_fractions = {}
total_number_of_moles = 0.0 * unit.moles
total_solvent_mass = 1.0 * unit.kilograms
for constraint in constraints:
molality = constraint.value
moles = molality * total_solvent_mass
number_of_moles[constraint.compound_index] = moles
total_number_of_moles += moles
if (
number_of_constraints != len(compounds) - 1
and solvent_mole_fractions is None
) or (
number_of_constraints != len(compounds)
and solvent_mole_fractions is not None
):
raise ValueError(
f"The number of molality constraints ({number_of_constraints}) must be one "
f"less than the number of compounds being constrained ({len(compounds)}) if a "
f"solvent list is not present, otherwise there must be an equal number."
)
if (
number_of_constraints == len(compounds) - 1
and solvent_mole_fractions is None
):
# In this case, there is no explicit solvent entry and the last component
# whose molality has not been constrained is considered to be the 'solvent'
for compound_index in compounds:
if compound_index in number_of_moles:
continue
compound_smiles = compounds[compound_index].smiles
compound_weight = _PureOrMixtureData._smiles_to_molecular_weight(
compound_smiles
)
moles = total_solvent_mass / compound_weight
number_of_moles[compound_index] = moles
total_number_of_moles += moles
elif (
number_of_constraints == len(compounds)
and solvent_mole_fractions is not None
):
solvent_moles = _PureOrMixtureData._solvent_mole_fractions_to_moles(
total_solvent_mass, solvent_mole_fractions, solvent_compounds
)
for solvent_index in solvent_moles:
number_of_moles[solvent_index] = solvent_moles[solvent_index]
total_number_of_moles += solvent_moles[solvent_index]
for compound_index in number_of_moles:
mole_fraction = number_of_moles[compound_index] / total_number_of_moles
mole_fractions[compound_index] = mole_fraction
return mole_fractions
@staticmethod
def build_substance(thermoml_property, constraints, compounds):
"""Build a Substance object from the extracted constraints and compounds.
Parameters
----------
thermoml_property: ThermoMLProperty
The property to which this mixture belongs.
constraints: list of _Constraint
The ThermoML constraints.
compounds: dict of int and _Compound
A dictionary of the compounds this PureOrMixtureData was calculated for.
Returns
----------
Substance
The constructed substance.
"""
# TODO: We need to take into account `thermoml_property.target_compound_index` and
# `thermoml_property.solute_standard_state` to properly identify infinitely
# diluted solutes in the system (if any). Otherwise the solute will be
# assigned a mole fraction of zero.
solvent_constraint_type = _ConstraintType.Undefined
component_constraint_type = _ConstraintType.Undefined
solvent_indices = set()
for solvent_index in thermoml_property.solvents:
if solvent_index in solvent_indices:
continue
solvent_indices.add(solvent_index)
# Determine which types of solvent and component constraints are
# being applied.
for constraint in constraints:
# Make sure we hunt down solvent indices.
for solvent_index in constraint.solvents:
if solvent_index in solvent_indices:
continue
solvent_indices.add(solvent_index)
# Only composition type restraints apply here, skip
# the rest.
if not constraint.type.is_composition_constraint():
continue
if (
constraint.type == _ConstraintType.SolventMassFraction
or constraint.type == _ConstraintType.SolventMoleFraction
or constraint.type == _ConstraintType.SolventMolality
):
if solvent_constraint_type == _ConstraintType.Undefined:
solvent_constraint_type = constraint.type
if solvent_constraint_type != constraint.type:
logging.debug(
f"A property with different types of solvent composition constraints "
f"was found - {solvent_constraint_type} vs {constraint.type}). This "
f"is likely a bug in the ThermoML file and so this property will be "
f"skipped."
)
return None
else:
if component_constraint_type == _ConstraintType.Undefined:
component_constraint_type = constraint.type
if component_constraint_type != constraint.type:
logging.debug(
f"A property with different types of composition constraints "
f"was found - {component_constraint_type} vs {constraint.type}). This "
f"is likely a bug in the ThermoML file and so this property will be "
f"skipped."
)
return None
# If no constraint was applied, this very likely means a pure substance
# was found.
if (
component_constraint_type == _ConstraintType.Undefined
and solvent_constraint_type == _ConstraintType.Undefined
):
component_constraint_type = _ConstraintType.ComponentMoleFraction
elif (
component_constraint_type == _ConstraintType.Undefined
and solvent_constraint_type != _ConstraintType.Undefined
):
logging.debug(
f"A property with only solvent composition "
f"constraints {solvent_constraint_type} was found."
)
return None
solvent_mole_fractions = {}
solvent_constraints = [
constraint
for constraint in constraints
if constraint.type == solvent_constraint_type
]
solvent_compounds = {}
for solvent_index in solvent_indices:
if solvent_index in compounds:
solvent_compounds[solvent_index] = compounds[solvent_index]
continue
logging.debug(
"The composition of a non-existent solvent was "
"found. This usually only occurs in cases were "
"the solvent component could not be understood "
"by the framework."
)
return None
# Make sure all of the solvents have not been removed.
if (
solvent_constraint_type != _ConstraintType.Undefined
and len(solvent_indices) == 0
):
logging.debug(
"The composition of a solvent was found, however the "
"solvent list is empty. This usually only occurs in "
"cases were the solvent component could not be understood "
"by the framework."
)
return None
remaining_constraints = [
constraint
for constraint in constraints
if constraint.type == component_constraint_type
]
remaining_compounds = {}
for compound_index in compounds:
if compound_index in solvent_indices:
continue
remaining_compounds[compound_index] = compounds[compound_index]
# Determine the mole fractions of the solvent species, if any.
if solvent_constraint_type == _ConstraintType.SolventMoleFraction:
solvent_mole_fractions = _PureOrMixtureData._convert_mole_fractions(
solvent_constraints, solvent_compounds
)
elif solvent_constraint_type == _ConstraintType.SolventMassFraction:
solvent_mole_fractions = _PureOrMixtureData._convert_mass_fractions(
solvent_constraints, solvent_compounds
)
elif solvent_constraint_type == _ConstraintType.SolventMolality:
solvent_mole_fractions = _PureOrMixtureData._convert_molality(
solvent_constraints, solvent_compounds
)
elif solvent_constraint_type == _ConstraintType.Undefined:
solvent_mole_fractions = None
solvent_compounds = None
remaining_compounds = compounds
# Determine the mole fractions of the remaining compounds.
mole_fractions = {}
if component_constraint_type == _ConstraintType.ComponentMoleFraction:
mole_fractions = _PureOrMixtureData._convert_mole_fractions(
remaining_constraints, remaining_compounds, solvent_mole_fractions
)
elif component_constraint_type == _ConstraintType.ComponentMassFraction:
mole_fractions = _PureOrMixtureData._convert_mass_fractions(
remaining_constraints,
remaining_compounds,
solvent_mole_fractions,
solvent_compounds,
)
elif component_constraint_type == _ConstraintType.ComponentMolality:
mole_fractions = _PureOrMixtureData._convert_molality(
remaining_constraints,
remaining_compounds,
solvent_mole_fractions,
solvent_compounds,
)
if len(mole_fractions) != len(compounds):
raise ValueError(
f"The number of mole fractions ({len(mole_fractions)}) does not "
f"equal the total number of compounds ({len(compounds)})"
)
# Make sure we haven't picked up a dimensionless unit be accident.
for compound_index in mole_fractions:
if isinstance(mole_fractions[compound_index], unit.Quantity):
mole_fractions[compound_index] = (
mole_fractions[compound_index].to(unit.dimensionless).magnitude
)
total_mol_fraction = sum([value for value in mole_fractions.values()])
if not np.isclose(total_mol_fraction, 1.0):
raise ValueError(
f"The total mole fraction {total_mol_fraction} is not equal to 1.0"
)
substance = Substance()
for compound_index in compounds:
compound = compounds[compound_index]
if np.isclose(mole_fractions[compound_index], 0.0):
continue
substance.add_component(
component=Component(smiles=compound.smiles),
amount=MoleFraction(mole_fractions[compound_index]),
)
return substance
@staticmethod
def extract_measured_properties(
node,
namespace,
property_definitions,
global_constraints,
variable_definitions,
compounds,
):
"""Extract the measured properties defined by a ThermoML
PureOrMixtureData node.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
property_definitions: dict of int and ThermoMLProperty
The extracted property definitions.
global_constraints: list of _Constraint
The extracted constraints.
variable_definitions: dict of int and _VariableDefinition
The extracted variable definitions.
compounds: dict of int and _Compound
The extracted compounds.
Returns
----------
list of ThermoMLProperty
The extracted measured properties.
"""
value_nodes = node.findall("ThermoML:NumValues", namespace)
measured_properties = []
# Each value_node corresponds to one measure property.
for value_node in value_nodes:
constraints = []
temperature_constraint = None
pressure_constraint = None
for global_constraint in global_constraints:
constraint = copy.deepcopy(global_constraint)
constraints.append(constraint)
if constraint.type == _ConstraintType.Temperature:
temperature_constraint = constraint
elif constraint.type == _ConstraintType.Pressure:
pressure_constraint = constraint
# First extract the values of any variable constraints
variable_nodes = value_node.findall("ThermoML:VariableValue", namespace)
skip_entry = False
for variable_node in variable_nodes:
variable_index = int(
variable_node.find("./ThermoML:nVarNumber", namespace).text
)
if variable_index not in variable_definitions:
# The property was constrained by an unsupported variable and
# so will be skipped for now.
skip_entry = True
break
variable_definition = variable_definitions[variable_index]
variable_value = float(
variable_node.find("ThermoML:nVarValue", namespace).text
)
value_as_quantity = unit.Quantity(
variable_value, variable_definition.default_unit
)
# Convert the 'variable' into a full constraint entry
constraint = _Constraint.from_variable(
variable_definition, value_as_quantity
)
constraints.append(constraint)
if constraint.type == _ConstraintType.Temperature:
temperature_constraint = constraint
elif constraint.type == _ConstraintType.Pressure:
pressure_constraint = constraint
if skip_entry:
continue
# Extract the thermodynamic state that the property was measured at.
if temperature_constraint is None:
logging.debug(
"A property did not report the temperature it "
"was measured at and will be ignored."
)
continue
temperature = temperature_constraint.value
pressure = (
None if pressure_constraint is None else pressure_constraint.value
)
thermodynamic_state = ThermodynamicState(
temperature=temperature, pressure=pressure
)
# Now extract the actual values of the measured properties, and their
# uncertainties
property_nodes = value_node.findall("ThermoML:PropertyValue", namespace)
for property_node in property_nodes:
property_index = int(
property_node.find("./ThermoML:nPropNumber", namespace).text
)
if property_index not in property_definitions:
# Most likely the property was dropped earlier due to an unsupported phase / type
continue
property_definition = property_definitions[property_index]
uncertainty = _PureOrMixtureData.extract_uncertainty(
property_node, namespace, property_definition
)
measured_property = copy.deepcopy(property_definition)
measured_property.thermodynamic_state = thermodynamic_state
property_value_node = property_node.find(
".//ThermoML:nPropValue", namespace
)
measured_property.set_value(
float(property_value_node.text),
None if uncertainty is None else float(uncertainty),
)
mixture = _PureOrMixtureData.build_substance(
measured_property, constraints, compounds
)
if mixture is None:
continue
measured_property.substance = mixture
measured_properties.append(measured_property)
# By this point we now have the measured properties and the thermodynamic state
# they were measured at converted to standardised classes.
return measured_properties
@staticmethod
def from_xml_node(node, namespace, compounds):
"""Extracts all of the data in a ThermoML PureOrMixtureData node.
Parameters
----------
node: xml.etree.Element
The xml node to read.
namespace: dict of str and str
The xml namespace.
compounds: dict of int and _Compound
A list of the already extracted `_Compound`'s.
Returns
----------
list of ThermoMLProperty
A list of extracted properties.
"""
# Figure out which compounds are going to be associated with
# the property entries.
compound_indices = _PureOrMixtureData.extract_compound_indices(
node, namespace, compounds
)
if compound_indices is None:
# Most likely this entry depended on a non-parsable compound
# and will be skipped entirely
return None
if len(compound_indices) == 0:
logging.debug("A PureOrMixtureData entry with no compounds was ignored.")
return None
phase_nodes = node.findall("./ThermoML:PhaseID/ThermoML:ePhase", namespace)
all_phases = None
for phase_node in phase_nodes:
phase = _phase_from_thermoml_string(phase_node.text)
if phase == PropertyPhase.Undefined:
logging.debug(
f"A property was measured in an unsupported phase "
f"({phase_node.text}) and will be skipped."
)
return None
all_phases = phase if all_phases is None else all_phases | phase
# Extract property definitions - values come later!
property_definitions = _PureOrMixtureData.extract_property_definitions(
node, namespace, all_phases
)
if len(property_definitions) == 0:
return None
for property_index in property_definitions:
all_phases |= property_definitions[property_index].phase
property_definitions[property_index].phase |= all_phases
# Extract any constraints on the system e.g pressure, temperature
global_constraints = _PureOrMixtureData.extract_global_constraints(
node, namespace, compounds
)
if global_constraints is None:
return None
# Extract any variables set on the system e.g pressure, temperature
# Only the definition entry and not the value of the variable is extracted
variable_definitions = _PureOrMixtureData.extract_variable_definitions(
node, namespace, compounds
)
if len(global_constraints) == 0 and len(variable_definitions) == 0:
logging.debug("A PureOrMixtureData entry with no constraints was ignored.")
return None
used_compounds = {}
for compound_index in compounds:
if compound_index not in compound_indices:
continue
used_compounds[compound_index] = compounds[compound_index]
measured_properties = _PureOrMixtureData.extract_measured_properties(
node,
namespace,
property_definitions,
global_constraints,
variable_definitions,
used_compounds,
)
return measured_properties
class ThermoMLProperty:
"""A wrapper around a ThermoML Property node."""
class SoluteStandardState(Enum):
"""Describes the standard state of a solute."""
Undefined = ("Undefined",)
InfiniteDilutionSolute = ("Infinite dilution solute",)
PureCompound = ("Pure compound",)
PureLiquidSolute = ("Pure liquid solute",)
StandardMolality = ("Standard molality (1 mol/kg) solute",)
@staticmethod
def from_node(node):
"""Converts an `eStandardState` node a `ThermoMLProperty.SoluteStandardState`.
Parameters
----------
node: xml.etree.Element
The xml node to convert.
Returns
----------
ThermoMLProperty.SoluteStandardState
The converted state type.
"""
try:
standard_state = ThermoMLProperty.SoluteStandardState(node.text)
except (KeyError, ValueError):
standard_state = ThermoMLProperty.SoluteStandardState.Undefined
if standard_state == _ConstraintType.Undefined:
logging.debug(
f"{node.tag}->{node.text} is an unsupported "
f"solute standard state type."
)
return standard_state
def __init__(self, type_string):
self.type_string = type_string
self.thermodynamic_state = None
self.phase = PropertyPhase.Undefined
self.substance = None
self.value = None
self.uncertainty = None
self.source = None
self.index = None
self.solute_standard_state = ThermoMLProperty.SoluteStandardState.Undefined
self.solvents = []
self.target_compound_index = None
self.property_uncertainty_definitions = {}
self.combined_uncertainty_definitions = {}
self.default_unit = None
self.target_compound_index = None
@staticmethod
def extract_uncertainty_definitions(
node,
namespace,
property_uncertainty_definitions,
combined_uncertainty_definitions,
):
"""Extract any property or combined uncertainties from a property xml node.
Parameters
----------
node: Element
The xml node to convert.
namespace: dict of str and str
The xml namespace.
property_uncertainty_definitions: list(_PropertyUncertainty)
A list of the extracted property uncertainties.
combined_uncertainty_definitions: list(_PropertyUncertainty)
A list of the extracted combined property uncertainties.
"""
property_nodes = node.findall("ThermoML:CombinedUncertainty", namespace)
for property_node in property_nodes:
if property_node is None:
continue
uncertainty_definition = _CombinedUncertainty.from_xml(
property_node, namespace
)
if uncertainty_definition is None:
continue
combined_uncertainty_definitions[
uncertainty_definition.index
] = uncertainty_definition
property_nodes = node.findall("ThermoML:PropUncertainty", namespace)
for property_node in property_nodes:
if property_node is None:
continue
uncertainty_definition = _PropertyUncertainty.from_xml(
property_node, namespace
)
if uncertainty_definition is None:
continue
property_uncertainty_definitions[
uncertainty_definition.index
] = uncertainty_definition
@classmethod
def from_xml_node(cls, node, namespace, parent_phases):
"""Creates a ThermoMLProperty from an xml node.
Parameters
----------
node: Element
The xml node to convert.
namespace: dict of str and str
The xml namespace.
parent_phases: PropertyPhase
The phases specfied in the parent PureOrMixtureData node.
Returns
----------
_Compound
The created property.
"""
# Gather up all possible identifiers
index_node = node.find("ThermoML:nPropNumber", namespace)
property_index = int(index_node.text)
phase_node = node.find("./ThermoML:PropPhaseID//ThermoML:ePropPhase", namespace)
phase = PropertyPhase.Undefined | parent_phases
if phase_node is not None:
phase |= _phase_from_thermoml_string(phase_node.text)
reference_phase_node = node.find(
"./ThermoML:RefPhaseID//ThermoML:eRefPhase", namespace
)
if reference_phase_node is not None:
phase |= _phase_from_thermoml_string(reference_phase_node.text)
if phase == PropertyPhase.Undefined:
logging.debug(
f"A property was measured in an unsupported phase "
f"({phase_node.text}) and will be skipped."
)
return None
property_group_node = node.find(
"./ThermoML:Property-MethodID//ThermoML:PropertyGroup//*", namespace
)
property_name_node = property_group_node.find("./ThermoML:ePropName", namespace)
method_name_node = property_group_node.find("./ThermoML:eMethodName", namespace)
if method_name_node is None:
method_name_node = property_group_node.find(
"./ThermoML:sMethodName", namespace
)
if method_name_node is None or property_name_node is None:
raise RuntimeError("A property does not have a name / method entry.")
if property_name_node.text not in ThermoMLDataSet.registered_properties:
logging.debug(
f"An unsupported property was found "
f"({property_name_node.text}) and will be skipped."
)
return None
registered_plugin = ThermoMLDataSet.registered_properties[
property_name_node.text
]
if (registered_plugin.supported_phases & phase) != phase:
logging.debug(
f"The {property_name_node.text} property is currently only supported "
f"when measured in the {str(registered_plugin.supported_phases)} phase, "
f"and not the {str(phase)} phase."
)
return None
return_value = cls(property_name_node.text)
return_value.index = property_index
return_value.phase = phase
return_value.default_unit = _unit_from_thermoml_string(property_name_node.text)
return_value.method_name = method_name_node.text
property_uncertainty_definitions = {}
combined_uncertainty_definitions = {}
cls.extract_uncertainty_definitions(
node,
namespace,
property_uncertainty_definitions,
combined_uncertainty_definitions,
)
return_value.combined_uncertainty_definitions = combined_uncertainty_definitions
return_value.property_uncertainty_definitions = property_uncertainty_definitions
solvent_index_nodes = node.findall(
"./ThermoML:Solvent//ThermoML:nOrgNum", namespace
)
if solvent_index_nodes is not None:
for solvent_index_node in solvent_index_nodes:
return_value.solvents.append(int(solvent_index_node.text))
# The solute standard state appears to describe which a solute should
# be present in only trace amounts. It only seems to be relevant for
# activity based properties.
standard_state_node = node.find("./ThermoML:eStandardState", namespace)
if standard_state_node is not None:
return_value.solute_standard_state = (
ThermoMLProperty.SoluteStandardState.from_node(standard_state_node)
)
# Property->Property-MethodID->RegNum describes which compound is referred
# to if the property is based on one of the compounds e.g. the activity
# coefficient of compound 2.
target_compound_node = node.find(
"./ThermoML:Property-MethodID/ThermoML:RegNum/ThermoML:nOrgNum", namespace
)
if target_compound_node is not None:
return_value.target_compound_index = int(target_compound_node.text)
return return_value
def set_value(self, value, uncertainty):
"""Set the value and uncertainty of this property, adding units if necessary.
Parameters
----------
value: float or unit.Quantity
The value of the property
uncertainty: float or unit.Quantity, optional
The uncertainty in the value.
"""
value_quantity = value
if not isinstance(value_quantity, unit.Quantity):
value_quantity = value * self.default_unit
self.value = value_quantity
if uncertainty is not None:
uncertainty_quantity = uncertainty
if not isinstance(uncertainty_quantity, unit.Quantity):
uncertainty_quantity = uncertainty * self.default_unit
self.uncertainty = uncertainty_quantity
class ThermoMLDataSet(PhysicalPropertyDataSet):
"""A dataset of physical property measurements created from a ThermoML dataset.
Examples
--------
For example, we can use the DOI `10.1016/j.jct.2005.03.012` as a key
for retrieving the dataset from the ThermoML Archive:
>>> dataset = ThermoMLDataSet.from_doi('10.1016/j.jct.2005.03.012')
You can also specify multiple ThermoML Archive keys to create a dataset from multiple ThermoML files:
>>> thermoml_keys = ['<KEY>', '10.1021/acs.jced.5b00474']
>>> dataset = ThermoMLDataSet.from_doi(*thermoml_keys)
"""
registered_properties = {}
def __init__(self):
"""Constructs a new ThermoMLDataSet object."""
super().__init__()
@classmethod
def from_doi(cls, *doi_list):
"""Load a ThermoML data set from a list of DOIs
Parameters
----------
doi_list: str
The list of DOIs to pull data from
Returns
-------
ThermoMLDataSet
The loaded data set.
"""
return_value = None
for doi in doi_list:
# E.g https://trc.nist.gov/ThermoML/10.1016/j.jct.2016.12.009.xml
doi_url = f"https://trc.nist.gov/ThermoML/{doi}.xml"
data_set = cls._from_url(doi_url, MeasurementSource(doi=doi))
if data_set is None or len(data_set) == 0:
continue
if return_value is None:
return_value = data_set
else:
return_value.merge(data_set)
return return_value
@classmethod
def from_url(cls, *url_list):
"""Load a ThermoML data set from a list of URLs
Parameters
----------
url_list: str
The list of URLs to pull data from
Returns
-------
ThermoMLDataSet
The loaded data set.
"""
return_value = None
for url in url_list:
data_set = cls._from_url(url)
if data_set is None or len(data_set) == 0:
continue
if return_value is None:
return_value = data_set
else:
return_value.merge(data_set)
return return_value
@classmethod
def _from_url(cls, url, source=None):
"""Load a ThermoML data set from a given URL
Parameters
----------
url: str
The URL to pull data from
source: Source, optional
An optional source which gives more information (e.g DOIs) for the url.
Returns
----------
ThermoMLDataSet
The loaded data set.
"""
if source is None:
source = MeasurementSource(reference=url)
return_value = None
try:
request = requests.get(url)
request.raise_for_status()
# Handle the case where ThermoML returns a 404 error code, but rather
# redirects to an error page with code 200.
if request.text.startswith("<html>"):
raise HTTPError(url, 404, "Not found", None, None)
return_value = cls.from_xml(request.text, source)
except (HTTPError, requests.exceptions.HTTPError):
logger.warning(f"No ThermoML file could not be found at {url}")
return return_value
@classmethod
def from_file(cls, *file_list):
"""Load a ThermoML data set from a list of files
Parameters
----------
file_list: str
The list of files to pull data from
Returns
-------
ThermoMLDataSet
The loaded data set.
"""
return_value = None
counter = 0
for file in file_list:
data_set = cls._from_file(file)
counter += 1
if data_set is None or len(data_set) == 0:
continue
if return_value is None:
return_value = data_set
else:
return_value.merge(data_set)
return return_value
@classmethod
def _from_file(cls, path):
"""Load a ThermoML data set from a given file
Parameters
----------
path: str
The file path to pull data from
Returns
-------
ThermoMLDataSet
The loaded data set.
"""
source = MeasurementSource(reference=path)
return_value = None
try:
with open(path) as file:
return_value = ThermoMLDataSet.from_xml(file.read(), source)
except FileNotFoundError:
logger.warning(f"No ThermoML file could not be found at {path}")
return return_value
@classmethod
def from_xml(cls, xml, default_source):
"""Load a ThermoML data set from an xml object.
Parameters
----------
xml: str
The xml string to parse.
default_source: Source
The source to use if one cannot be parsed from the archive itself.
Returns
-------
ThermoMLDataSet
The loaded ThermoML data set.
"""
root_node = ElementTree.fromstring(xml)
if root_node is None:
logger.warning("The ThermoML XML document could not be parsed.")
return None
if root_node.tag.find("DataReport") < 0:
logger.warning(
"The ThermoML XML document does not contain the expected root node."
)
return None
# Extract the namespace that will prefix all type names
namespace_string = re.search(r"{.*\}", root_node.tag).group(0)[1:-1]
namespace = {"ThermoML": namespace_string}
# Attempt to find a DOI for this archive
doi_node = root_node.find("ThermoML:Citation/ThermoML:sDOI", namespace)
if doi_node is not None:
source = MeasurementSource(doi=doi_node.text)
else:
source = default_source
return_value = ThermoMLDataSet()
compounds = {}
# Extract the base compounds present in the xml file
for node in root_node.findall("ThermoML:Compound", namespace):
compound = _Compound.from_xml_node(node, namespace)
if compound is None:
continue
if compound.index in compounds:
raise RuntimeError(
"A ThermoML data set contains two compounds with the same index"
)
compounds[compound.index] = compound
# Pull out any and all properties in the file.
for node in root_node.findall("ThermoML:PureOrMixtureData", namespace):
properties = _PureOrMixtureData.from_xml_node(node, namespace, compounds)
if properties is None or len(properties) == 0:
continue
for measured_property in properties:
registered_plugin = ThermoMLDataSet.registered_properties[
measured_property.type_string
]
mapped_property = registered_plugin.conversion_function(
measured_property
)
mapped_property.source = source
return_value.add_properties(mapped_property)
return return_value
|
jaketanderson/openff-evaluator
|
openff/evaluator/layers/workflow.py
|
<filename>openff/evaluator/layers/workflow.py
"""Provides base classes for calculation layers which will
use the built-in workflow framework to estimate the set of
physical properties.
"""
import abc
import copy
import logging
import os
from openff.evaluator.attributes import UNDEFINED, Attribute
from openff.evaluator.datasets import CalculationSource
from openff.evaluator.layers import (
CalculationLayer,
CalculationLayerResult,
CalculationLayerSchema,
)
from openff.evaluator.workflow import Workflow, WorkflowGraph, WorkflowSchema
logger = logging.getLogger(__name__)
class WorkflowCalculationLayer(CalculationLayer, abc.ABC):
"""An calculation layer which uses the built-in workflow
framework to estimate sets of physical properties.
"""
@staticmethod
def _get_workflow_metadata(
working_directory,
physical_property,
force_field_path,
parameter_gradient_keys,
storage_backend,
calculation_schema,
):
"""Returns the global metadata to pass to the workflow.
Parameters
----------
working_directory: str
The local directory in which to store all local,
temporary calculation data from this workflow.
physical_property : PhysicalProperty
The property that the workflow will estimate.
force_field_path : str
The path to the force field parameters to use in the workflow.
parameter_gradient_keys: list of ParameterGradientKey
A list of references to all of the parameters which all observables
should be differentiated with respect to.
storage_backend: StorageBackend
The backend used to store / retrieve data from previous calculations.
calculation_schema: WorkflowCalculationSchema
The schema containing all of this layers options.
Returns
-------
dict of str and Any, optional
The global metadata to make available to a workflow.
Returns `None` if the required metadata could not be
found / assembled.
"""
target_uncertainty = None
if calculation_schema.absolute_tolerance != UNDEFINED:
target_uncertainty = calculation_schema.absolute_tolerance
elif calculation_schema.relative_tolerance != UNDEFINED:
target_uncertainty = (
physical_property.uncertainty * calculation_schema.relative_tolerance
)
global_metadata = Workflow.generate_default_metadata(
physical_property,
force_field_path,
parameter_gradient_keys,
target_uncertainty,
)
return global_metadata
@classmethod
def _build_workflow_graph(
cls,
working_directory,
storage_backend,
properties,
force_field_path,
parameter_gradient_keys,
options,
):
"""Construct a graph of the protocols needed to calculate a set of
properties.
Parameters
----------
working_directory: str
The local directory in which to store all local,
temporary calculation data from this graph.
storage_backend: StorageBackend
The backend used to store / retrieve data from previous calculations.
properties : list of PhysicalProperty
The properties to attempt to compute.
force_field_path : str
The path to the force field parameters to use in the workflow.
parameter_gradient_keys: list of ParameterGradientKey
A list of references to all of the parameters which all observables
should be differentiated with respect to.
options: RequestOptions
The options to run the workflows with.
"""
provenance = {}
workflows = []
for index, physical_property in enumerate(properties):
logger.info(f"Building workflow {index} of {len(properties)}")
property_type = type(physical_property).__name__
# Make sure a schema has been defined for this class of property
# and this layer.
if (
property_type not in options.calculation_schemas
or cls.__name__ not in options.calculation_schemas[property_type]
):
continue
schema = options.calculation_schemas[property_type][cls.__name__]
# Make sure the calculation schema is the correct type for this layer.
assert isinstance(schema, WorkflowCalculationSchema)
assert isinstance(schema, cls.required_schema_type())
global_metadata = cls._get_workflow_metadata(
working_directory,
physical_property,
force_field_path,
parameter_gradient_keys,
storage_backend,
schema,
)
if global_metadata is None:
# Make sure we have metadata returned for this
# property, e.g. we have data to reweight if
# required.
continue
workflow = Workflow(global_metadata, physical_property.id)
workflow.schema = schema.workflow_schema
workflows.append(workflow)
workflow_graph = WorkflowGraph()
workflow_graph.add_workflows(*workflows)
for workflow in workflows:
provenance[workflow.uuid] = CalculationSource(
fidelity=cls.__name__, provenance=workflow.schema.json()
)
return workflow_graph, provenance
@staticmethod
def workflow_to_layer_result(queued_properties, provenance, workflow_results, **_):
"""Converts a list of `WorkflowResult` to a list of `CalculationLayerResult`
objects.
Parameters
----------
queued_properties: list of PhysicalProperty
The properties being estimated by this layer
provenance: dict of str and str
The provenance of each property.
workflow_results: list of WorkflowResult
The results of each workflow.
Returns
-------
list of CalculationLayerResult
The calculation layer result objects.
"""
properties_by_id = {x.id: x for x in queued_properties}
results = []
for workflow_result in workflow_results:
calculation_result = CalculationLayerResult()
calculation_result.exceptions.extend(workflow_result.exceptions)
results.append(calculation_result)
if len(calculation_result.exceptions) > 0:
continue
physical_property = properties_by_id[workflow_result.workflow_id]
physical_property = copy.deepcopy(physical_property)
physical_property.source = provenance[physical_property.id]
physical_property.value = workflow_result.value.value
physical_property.uncertainty = workflow_result.value.error
if len(workflow_result.gradients) > 0:
physical_property.gradients = workflow_result.gradients
calculation_result.physical_property = physical_property
calculation_result.data_to_store.extend(workflow_result.data_to_store)
return results
@classmethod
def _schedule_calculation(
cls,
calculation_backend,
storage_backend,
layer_directory,
batch,
):
# Store a temporary copy of the force field for protocols to easily access.
force_field_source = storage_backend.retrieve_force_field(batch.force_field_id)
force_field_path = os.path.join(layer_directory, batch.force_field_id)
with open(force_field_path, "w") as file:
file.write(force_field_source.json())
workflow_graph, provenance = cls._build_workflow_graph(
layer_directory,
storage_backend,
batch.queued_properties,
force_field_path,
batch.parameter_gradient_keys,
batch.options,
)
workflow_futures = workflow_graph.execute(layer_directory, calculation_backend)
future = calculation_backend.submit_task(
WorkflowCalculationLayer.workflow_to_layer_result,
batch.queued_properties,
provenance,
workflow_futures,
)
return [future]
class WorkflowCalculationSchema(CalculationLayerSchema):
"""A schema which encodes the options and the workflow schema
that a `CalculationLayer` should use when estimating a given class
of physical properties using the built-in workflow framework.
"""
workflow_schema = Attribute(
docstring="The workflow schema to use when estimating properties.",
type_hint=WorkflowSchema,
default_value=UNDEFINED,
)
def validate(self, attribute_type=None):
super(WorkflowCalculationSchema, self).validate(attribute_type)
self.workflow_schema.validate()
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_forcefields/test_gradients.py
|
<reponame>jaketanderson/openff-evaluator
import numpy as np
import pytest
from openff.units import unit
from openff.evaluator.forcefield import ParameterGradient, ParameterGradientKey
def test_gradient_addition():
gradient_a = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin
)
gradient_b = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin
)
result = gradient_a + gradient_b
assert np.isclose(result.value.to(unit.kelvin).magnitude, 3.0)
gradient_c = ParameterGradient(
ParameterGradientKey("vdW", "[#6:1]", "epsilon"), 1.0 * unit.kelvin
)
with pytest.raises(ValueError):
gradient_a + gradient_c
with pytest.raises(ValueError):
gradient_a + 1.0
def test_gradient_subtraction():
gradient_a = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin
)
gradient_b = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin
)
result = gradient_a - gradient_b
assert np.isclose(result.value.to(unit.kelvin).magnitude, -1.0)
result = gradient_b - gradient_a
assert np.isclose(result.value.to(unit.kelvin).magnitude, 1.0)
gradient_c = ParameterGradient(
ParameterGradientKey("vdW", "[#6:1]", "epsilon"), 1.0 * unit.kelvin
)
with pytest.raises(ValueError):
gradient_a - gradient_c
with pytest.raises(ValueError):
gradient_c - gradient_a
with pytest.raises(ValueError):
gradient_a - 1.0
def test_gradient_multiplication():
gradient_a = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin
)
result = gradient_a * 2.0
assert np.isclose(result.value.to(unit.kelvin).magnitude, 2.0)
result = 3.0 * gradient_a
assert np.isclose(result.value.to(unit.kelvin).magnitude, 3.0)
gradient_c = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin
)
with pytest.raises(ValueError):
gradient_a * gradient_c
def test_gradient_division():
gradient_a = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 2.0 * unit.kelvin
)
result = gradient_a / 2.0
assert np.isclose(result.value.to(unit.kelvin).magnitude, 1.0)
gradient_c = ParameterGradient(
ParameterGradientKey("vdW", "[#1:1]", "epsilon"), 1.0 * unit.kelvin
)
with pytest.raises(ValueError):
gradient_a / gradient_c
|
jaketanderson/openff-evaluator
|
integration-tests/paprika/run.py
|
<gh_stars>10-100
import os
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.units import unit
from openff import evaluator
from openff.evaluator.attributes import UNDEFINED
from openff.evaluator.backends import QueueWorkerResources
from openff.evaluator.backends.dask import DaskLSFBackend
from openff.evaluator.datasets.taproom import TaproomDataSet
from openff.evaluator.forcefield import SmirnoffForceFieldSource, TLeapForceFieldSource
from openff.evaluator.properties import HostGuestBindingAffinity
from openff.evaluator.utils import get_data_filename, setup_timestamp_logging
from openff.evaluator.utils.utils import temporarily_change_directory
from openff.evaluator.workflow import Workflow
def main():
setup_timestamp_logging()
# Retrieve the current version.
version = evaluator.__version__.replace(".", "-").replace("v", "")
if "+" in version:
version = "latest"
# Create a new directory to run the current versions results in.
os.makedirs(os.path.join(version, "results"))
with temporarily_change_directory(version):
# Load in the force field
force_field = ForceField(
"openff-1.2.0.offxml",
get_data_filename("forcefield/tip3p.offxml"),
)
force_field_source = SmirnoffForceFieldSource.from_object(force_field)
force_field_source.json("force-field.json")
# Load in the data set, retaining only a specific host / guest pair.
binding_affinity = TaproomDataSet(
host_codes=["acd"],
guest_codes=["bam"],
default_ionic_strength=150 * unit.millimolar,
).properties[0]
# Set up the calculation
schema = HostGuestBindingAffinity.default_paprika_schema(
n_solvent_molecules=2000
).workflow_schema
schema.replace_protocol_types(
{
"BaseBuildSystem": (
"BuildSmirnoffSystem"
if isinstance(force_field_source, SmirnoffForceFieldSource)
else "BuildTLeapSystem"
if isinstance(force_field_source, TLeapForceFieldSource)
else "BaseBuildSystem"
)
}
)
metadata = Workflow.generate_default_metadata(
binding_affinity, "force-field.json", UNDEFINED
)
workflow = Workflow.from_schema(schema, metadata, "acd_bam")
# Run the calculation
with DaskLSFBackend(
minimum_number_of_workers=1,
maximum_number_of_workers=50,
resources_per_worker=QueueWorkerResources(
number_of_gpus=1,
preferred_gpu_toolkit=QueueWorkerResources.GPUToolkit.CUDA,
per_thread_memory_limit=5 * unit.gigabyte,
wallclock_time_limit="05:59",
),
setup_script_commands=[
"conda activate openff-evaluator-paprika",
"module load cuda/10.0",
],
queue_name="gpuqueue",
) as calculation_backend:
results = workflow.execute(
root_directory="workflow", calculation_backend=calculation_backend
).result()
# Save the results
results.json("results.json", format=True)
if __name__ == "__main__":
main()
|
jaketanderson/openff-evaluator
|
openff/evaluator/layers/plugins.py
|
"""
An API for registering new calculation layers.
Attributes
----------
registered_calculation_layers: dict of str and type of CalculationLayer
The calculation layers which have been registered as being
available to use in property estimations.
registered_calculation_schemas: dict of str and dict of str and type of CalculationLayerSchema
The default calculation schemas to use when estimating a class of properties (e.g. `Density`)
with a specific calculation layer (e.g. `SimulationLayer`).
The dictionary is of the form `registered_calculation_schemas['LayerType']['PropertyType']`
"""
from collections import defaultdict
from typing import Dict, Type
from openff.evaluator.datasets import PhysicalProperty
from openff.evaluator.layers import CalculationLayer, CalculationLayerSchema
registered_calculation_layers: Dict[str, Type[CalculationLayer]] = {}
registered_calculation_schemas: Dict[
str, Dict[str, CalculationLayerSchema]
] = defaultdict(dict)
def register_calculation_layer(layer_class):
"""Registers a class as being a calculation layer
which may be used in property calculations.
Parameters
----------
layer_class: type of CalculationLayer
The calculation layer to register.
"""
assert issubclass(layer_class, CalculationLayer)
assert issubclass(layer_class.required_schema_type(), CalculationLayerSchema)
if layer_class.__name__ in registered_calculation_layers:
raise ValueError(f"The {layer_class} layer is already registered.")
registered_calculation_layers[layer_class.__name__] = layer_class
def register_calculation_schema(property_class, layer_class, schema):
"""Registers the default calculation schema to use when estimating a
class of properties (e.g. `Density`) with a specific calculation layer
(e.g. the `SimulationLayer`).
Parameters
----------
property_class: type of PhysicalProperty
The class of properties to associate with the
specified `calculation_layer` and `property_class`.
layer_class: type of CalculationLayer
The calculation layer to associate the schema with.
schema: CalculationLayerSchema or Callable[[CalculationLayerSchema], CalculationLayerSchema]
Either the calculation schema to use, or a function which
will create the schema from an existing CalculationLayerSchema.
"""
assert issubclass(property_class, PhysicalProperty)
assert issubclass(layer_class, CalculationLayer)
assert isinstance(schema, CalculationLayerSchema) or callable(schema)
assert property_class != PhysicalProperty
assert layer_class != CalculationLayer
registered_calculation_schemas[layer_class.__name__][
property_class.__name__
] = schema
def calculation_layer():
"""A decorator which registers a class as being a calculation layer
which may be used in property calculations.
"""
def decorator(cls):
register_calculation_layer(cls)
return cls
return decorator
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_layers/test_layers.py
|
<reponame>jaketanderson/openff-evaluator
import json
import tempfile
from os import makedirs, path
from openff.evaluator.backends.dask import DaskLocalCluster
from openff.evaluator.client import RequestOptions
from openff.evaluator.layers import (
CalculationLayer,
CalculationLayerResult,
CalculationLayerSchema,
calculation_layer,
)
from openff.evaluator.properties import Density
from openff.evaluator.server import server
from openff.evaluator.storage import LocalFileStorage
from openff.evaluator.storage.data import StoredSimulationData
from openff.evaluator.tests.utils import create_dummy_property
from openff.evaluator.utils.exceptions import EvaluatorException
from openff.evaluator.utils.observables import ObservableFrame
from openff.evaluator.utils.serialization import TypedJSONDecoder, TypedJSONEncoder
from openff.evaluator.utils.utils import temporarily_change_directory
@calculation_layer()
class DummyCalculationLayer(CalculationLayer):
"""A dummy calculation layer class to test out the base
calculation layer methods.
"""
@classmethod
def required_schema_type(cls):
return CalculationLayerSchema
@classmethod
def _schedule_calculation(
cls, calculation_backend, storage_backend, layer_directory, batch
):
futures = [
# Fake a success.
calculation_backend.submit_task(
DummyCalculationLayer.process_successful_property,
batch.queued_properties[0],
layer_directory,
),
# Fake a failure.
calculation_backend.submit_task(
DummyCalculationLayer.process_failed_property,
batch.queued_properties[1],
),
# Cause an exception.
calculation_backend.submit_task(
DummyCalculationLayer.return_bad_result,
batch.queued_properties[0],
layer_directory,
),
]
return futures
@staticmethod
def process_successful_property(physical_property, layer_directory, **_):
"""Return a result as if the property had been successfully estimated."""
dummy_data_directory = path.join(layer_directory, "good_dummy_data")
makedirs(dummy_data_directory, exist_ok=True)
dummy_stored_object = StoredSimulationData()
dummy_stored_object.substance = physical_property.substance
dummy_stored_object.thermodynamic_state = physical_property.thermodynamic_state
dummy_stored_object.property_phase = physical_property.phase
dummy_stored_object.force_field_id = ""
dummy_stored_object.coordinate_file_name = ""
dummy_stored_object.trajectory_file_name = ""
dummy_stored_object.observables = ObservableFrame()
dummy_stored_object.statistical_inefficiency = 1.0
dummy_stored_object.number_of_molecules = 10
dummy_stored_object.source_calculation_id = ""
dummy_stored_object_path = path.join(layer_directory, "good_dummy_data.json")
with open(dummy_stored_object_path, "w") as file:
json.dump(dummy_stored_object, file, cls=TypedJSONEncoder)
return_object = CalculationLayerResult()
return_object.physical_property = physical_property
return_object.data_to_store = [(dummy_stored_object_path, dummy_data_directory)]
return return_object
@staticmethod
def process_failed_property(physical_property, **_):
"""Return a result as if the property could not be estimated."""
return_object = CalculationLayerResult()
return_object.physical_property = physical_property
return_object.exceptions = [EvaluatorException(message="Failure Message")]
return return_object
@staticmethod
def return_bad_result(physical_property, layer_directory, **_):
"""Return a result which leads to an unhandled exception."""
dummy_data_directory = path.join(layer_directory, "bad_dummy_data")
makedirs(dummy_data_directory, exist_ok=True)
dummy_stored_object = StoredSimulationData()
dummy_stored_object_path = path.join(layer_directory, "bad_dummy_data.json")
with open(dummy_stored_object_path, "w") as file:
json.dump(dummy_stored_object, file, cls=TypedJSONEncoder)
return_object = CalculationLayerResult()
return_object.physical_property = physical_property
return_object.data_to_store = [(dummy_stored_object_path, dummy_data_directory)]
return return_object
def test_base_layer():
properties_to_estimate = [
create_dummy_property(Density),
create_dummy_property(Density),
]
dummy_options = RequestOptions()
batch = server.Batch()
batch.queued_properties = properties_to_estimate
batch.options = dummy_options
batch.force_field_id = ""
batch.options.calculation_schemas = {
"Density": {"DummyCalculationLayer": CalculationLayerSchema()}
}
with tempfile.TemporaryDirectory() as temporary_directory:
with temporarily_change_directory(temporary_directory):
# Create a simple calculation backend to test with.
test_backend = DaskLocalCluster()
test_backend.start()
# Create a simple storage backend to test with.
test_storage = LocalFileStorage()
layer_directory = "dummy_layer"
makedirs(layer_directory)
def dummy_callback(returned_request):
assert len(returned_request.estimated_properties) == 1
assert len(returned_request.exceptions) == 2
dummy_layer = DummyCalculationLayer()
dummy_layer.schedule_calculation(
test_backend,
test_storage,
layer_directory,
batch,
dummy_callback,
True,
)
def test_serialize_layer_result():
"""Tests that the `CalculationLayerResult` can be properly
serialized and deserialized."""
dummy_result = CalculationLayerResult()
dummy_result.physical_property = create_dummy_property(Density)
dummy_result.exceptions = [EvaluatorException()]
dummy_result.data_to_store = [("dummy_object_path", "dummy_directory")]
dummy_result_json = json.dumps(dummy_result, cls=TypedJSONEncoder)
recreated_result = json.loads(dummy_result_json, cls=TypedJSONDecoder)
recreated_result_json = json.dumps(recreated_result, cls=TypedJSONEncoder)
assert recreated_result_json == dummy_result_json
|
jaketanderson/openff-evaluator
|
openff/evaluator/datasets/__init__.py
|
<reponame>jaketanderson/openff-evaluator<filename>openff/evaluator/datasets/__init__.py
from .provenance import CalculationSource, MeasurementSource, Source # isort:skip
from .datasets import PhysicalProperty, PhysicalPropertyDataSet, PropertyPhase
__all__ = [
PropertyPhase,
PhysicalProperty,
PhysicalPropertyDataSet,
CalculationSource,
MeasurementSource,
Source,
]
|
jaketanderson/openff-evaluator
|
openff/evaluator/tests/test_layers/test_workflow_layer.py
|
<reponame>jaketanderson/openff-evaluator
import os
import tempfile
from openff.units import unit
from openff.evaluator.backends.dask import DaskLocalCluster
from openff.evaluator.client import RequestOptions
from openff.evaluator.forcefield import SmirnoffForceFieldSource
from openff.evaluator.layers.simulation import SimulationLayer, SimulationSchema
from openff.evaluator.properties import Density
from openff.evaluator.protocols.miscellaneous import DummyProtocol
from openff.evaluator.server import server
from openff.evaluator.storage import LocalFileStorage
from openff.evaluator.tests.utils import create_dummy_property
from openff.evaluator.utils.observables import Observable
from openff.evaluator.utils.utils import temporarily_change_directory
from openff.evaluator.workflow import WorkflowSchema
from openff.evaluator.workflow.utils import ProtocolPath
def test_workflow_layer():
"""Test the `WorkflowLayer` calculation layer. As the `SimulationLayer`
is the simplest implementation of the abstract layer, we settle for
testing this."""
properties_to_estimate = [
create_dummy_property(Density),
create_dummy_property(Density),
]
# Create a very simple workflow which just returns some placeholder
# value.
estimated_value = Observable((1 * unit.kelvin).plus_minus(0.1 * unit.kelvin))
protocol_a = DummyProtocol("protocol_a")
protocol_a.input_value = estimated_value
schema = WorkflowSchema()
schema.protocol_schemas = [protocol_a.schema]
schema.final_value_source = ProtocolPath("output_value", protocol_a.id)
layer_schema = SimulationSchema()
layer_schema.workflow_schema = schema
options = RequestOptions()
options.add_schema("SimulationLayer", "Density", layer_schema)
batch = server.Batch()
batch.queued_properties = properties_to_estimate
batch.options = options
with tempfile.TemporaryDirectory() as directory:
with temporarily_change_directory(directory):
# Create a directory for the layer.
layer_directory = "simulation_layer"
os.makedirs(layer_directory)
# Set-up a simple storage backend and add a force field to it.
force_field = SmirnoffForceFieldSource.from_path(
"smirnoff99Frosst-1.1.0.offxml"
)
storage_backend = LocalFileStorage()
batch.force_field_id = storage_backend.store_force_field(force_field)
# Create a simple calculation backend to test with.
with DaskLocalCluster() as calculation_backend:
def dummy_callback(returned_request):
assert len(returned_request.estimated_properties) == 2
assert len(returned_request.exceptions) == 0
simulation_layer = SimulationLayer()
simulation_layer.schedule_calculation(
calculation_backend,
storage_backend,
layer_directory,
batch,
dummy_callback,
True,
)
|
jaketanderson/openff-evaluator
|
openff/evaluator/datasets/curation/components/thermoml.py
|
import glob
import io
import logging
import os
import tarfile
from multiprocessing import Pool
from typing import Optional, Union
import pandas
import requests
from pydantic import Field, HttpUrl
from typing_extensions import Literal
from openff.evaluator.datasets.curation.components import (
CurationComponent,
CurationComponentSchema,
)
from openff.evaluator.datasets.thermoml import ThermoMLDataSet
from openff.evaluator.utils.utils import temporarily_change_directory
logger = logging.getLogger(__name__)
class ImportThermoMLDataSchema(CurationComponentSchema):
type: Literal["ImportThermoMLData"] = "ImportThermoMLData"
retain_uncertainties: bool = Field(
True,
description="If False, all uncertainties in measured property values will be "
"stripped from the final data set.",
)
cache_file_name: Optional[str] = Field(
None,
description="The path to the file to store the output of this component "
"into, and to restore the output of this component from.",
)
root_archive_url: HttpUrl = Field(
default="https://data.nist.gov/od/ds/mds2-2422/ThermoML.v2020-09-30.tgz",
description="The root url where the main ThermoML archive can be downloaded "
"from.",
)
class ImportThermoMLData(CurationComponent):
"""A component which will import all supported data from the
NIST ThermoML archive for (optionally) specified journals.
"""
@classmethod
def _download_data(cls, schema: ImportThermoMLDataSchema):
# Download the archive of all properties from the journal.
request = requests.get(schema.root_archive_url, stream=True)
# Make sure the request went ok.
try:
request.raise_for_status()
except requests.exceptions.HTTPError as error:
print(error.response.text)
raise
# Unzip the files into the temporary directory.
tar_file = tarfile.open(fileobj=io.BytesIO(request.content))
tar_file.extractall()
@classmethod
def _process_archive(cls, file_path: str) -> pandas.DataFrame:
logger.debug(f"Processing {file_path}")
# noinspection PyBroadException
try:
data_set = ThermoMLDataSet.from_file(file_path)
except Exception:
logger.exception(
f"An exception was raised when processing {file_path}. This file will "
f"be skipped."
)
return pandas.DataFrame()
# A data set will be none if no 'valid' properties were found
# in the archive file.
if data_set is None:
return pandas.DataFrame()
data_frame = data_set.to_pandas()
return data_frame
@classmethod
def _apply(
cls,
data_frame: pandas.DataFrame,
schema: ImportThermoMLDataSchema,
n_processes,
) -> pandas.DataFrame:
if schema.cache_file_name is not None and os.path.isfile(
schema.cache_file_name
):
cached_data = pandas.read_csv(schema.cache_file_name)
return cached_data
with temporarily_change_directory():
logger.debug("Downloading archive data")
cls._download_data(schema)
# Get the names of the extracted files
file_names = glob.glob(os.path.join("10.*", "*.xml"))
logger.debug("Processing archives")
with Pool(processes=n_processes) as pool:
data_frames = [*pool.imap(cls._process_archive, file_names)]
pool.join()
logger.debug("Joining archives")
thermoml_data_frame = pandas.concat(data_frames, ignore_index=True, sort=False)
for header in thermoml_data_frame:
if header.find(" Uncertainty ") >= 0 and not schema.retain_uncertainties:
thermoml_data_frame = thermoml_data_frame.drop(header, axis=1)
data_frame = pandas.concat(
[data_frame, thermoml_data_frame], ignore_index=True, sort=False
)
if schema.cache_file_name is not None:
data_frame.to_csv(schema.cache_file_name, index=False)
return data_frame
ThermoMLComponentSchema = Union[ImportThermoMLDataSchema]
|
jaketanderson/openff-evaluator
|
docs/conf.py
|
<reponame>jaketanderson/openff-evaluator
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "OpenFF Evaluator"
copyright = "2019, Open Force Field Consortium."
author = "Open Force Field Consortium"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinxcontrib.bibtex",
"sphinx.ext.napoleon",
"sphinx.ext.autosummary",
"sphinx.ext.autosectionlabel",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"nbsphinx",
"openff_sphinx_theme",
]
# Autodoc settings
autosummary_generate = True
autodoc_preserve_defaults = True
autodoc_typehints_format = "short"
# Workaround for autodoc_typehints_format not working for attributes
# see https://github.com/sphinx-doc/sphinx/issues/10290#issuecomment-1079740009
python_use_unqualified_type_names = True
autodoc_default_options = {
"members": True,
"inherited-members": True,
"member-order": "bysource",
}
autodoc_mock_imports = [
"dask",
"dask_jobqueue",
"distributed",
"packmol",
"pydantic",
"pymbar",
"scipy",
"openmm",
"typing_extensions",
"yaml",
]
# Autolabel settings
autosectionlabel_maxdepth = 3
autosectionlabel_prefix_document = True
suppress_warnings = [
"autosectionlabel.releasehistory",
]
# nbsphinx settings
nbsphinx_execute = "never"
# sphinx bibtext settings
bibtex_bibfiles = [
os.path.join("properties", "commonworkflows.bib"),
os.path.join("properties", "gradients.bib"),
os.path.join("properties", "properties.bib"),
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst"]
# source_suffix = '.rst'
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Set up the intershinx mappings.
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"mdtraj": ("http://mdtraj.org/latest/", None),
"dask": ("http://docs.dask.org/en/latest/", None),
"dask.distributed": ("https://distributed.dask.org/en/latest/", None),
"distributed": ("https://distributed.dask.org/en/latest/", None),
"dask_jobqueue": ("https://jobqueue.dask.org/en/latest/", None),
"openff.toolkit": (
"https://open-forcefield-toolkit.readthedocs.io/en/latest/",
None,
),
"pint": ("https://pint.readthedocs.io/en/latest/", None),
}
# Set up mathjax.
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-svg.js"
# -- Options for HTML output -------------------------------------------------
html_theme = "openff_sphinx_theme"
html_sidebars = {"**": ["globaltoc.html", "localtoc.html", "searchbox.html"]}
html_theme_options = {
# Repository integration
# Set the repo url for the link to appear
"repo_url": "https://github.com/openforcefield/openff-evaluator",
# The name of the repo. If must be set if repo_url is set
"repo_name": "openff-evaluator",
# Must be one of github, gitlab or bitbucket
"repo_type": "github",
# Colour for sidebar captions and other accents. One of
# openff-blue, openff-toolkit-blue, openff-dataset-yellow,
# openff-evaluator-orange, aquamarine, lilac, amaranth, grape,
# violet, pink, pale-green, green, crimson, eggplant, turquoise,
# or a tuple of three ints in the range [0, 255] corresponding to
# a position in RGB space.
"color_accent": "openff-evaluator-orange",
}
html_static_path = ["_static"]
# sphinx-notfound-page
# https://github.com/readthedocs/sphinx-notfound-page
# Renders a 404 page with absolute links
from importlib.util import find_spec as find_import_spec
if find_import_spec("notfound"):
extensions.append("notfound.extension")
notfound_urls_prefix = "/projects/evaluator/en/stable/"
notfound_context = {
"title": "404: File Not Found",
"body": f"""
<h1>404: File Not Found</h1>
<p>
Sorry, we couldn't find that page. This often happens as a result of
following an outdated link. Please check the
<a href="{notfound_urls_prefix}">latest stable version</a>
of the docs, unless you're sure you want an earlier version, and
try using the search box or the navigation menu on the left.
</p>
<p>
</p>
""",
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "evaluatordoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"evaluator.tex",
"OpenFF Evaluator Documentation",
"openff-evaluator",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "openff-evaluator", "OpenFF Evaluator Documentation", [author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"openff-evaluator",
"OpenFF Evaluator Documentation",
author,
"openff-evaluator",
"A physical property evaluation toolkit from the Open Forcefield Consortium.",
"Miscellaneous",
),
]
# -- Extension configuration -------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.