content stringlengths 5 1.05M |
|---|
#
# Copyright (C) 2021 Satoru SATOH <satoru.satoh@gmail.com>
# SPDX-License-Identifier: MIT
#
"""Functions to get some meta info of the test target modules."""
import inspect
import itertools
import pathlib
import typing
from . import constants, datatypes
if typing.TYPE_CHECKING:
import pytest
def get_name_from_path(path: pathlib.Path,
pattern: typing.Pattern = constants.NAME_PATTERN
) -> str:
"""Get 'name' from given ``path``."""
try:
match = pattern.match(path.name)
if match:
return match.groups()[0]
except (AttributeError, IndexError):
pass
return ''
def get_target_module_path(request: 'pytest.FixtureRequest'
) -> pathlib.Path:
"""Resovle the path of the target module."""
return pathlib.Path(inspect.getfile(request.module))
def find_parent_dirs(path: pathlib.Path, root_name: str
) -> typing.List[pathlib.Path]:
"""Find out a series of sub dirs from the root dir named ``root_name``.
>>> path = pathlib.Path('/a/b/c/e/f/g/h.py')
>>> find_parent_dirs(path, 'c') # doctest: +NORMALIZE_WHITESPACE
[pathlib.Path('/a/b/c/e/f/g'), pathlib.Path('/a/b/c/e/f'),
pathlib.Path('/a/b/c/e'), pathlib.Path('/a/b/c')]
"""
if root_name not in path.parts:
raise ValueError(f'Path {path!s} does not contain {root_name}')
subdirs = list(
itertools.takewhile(lambda x: x.name != root_name, path.parents)
)
if not subdirs:
return [path.parent]
return subdirs + [subdirs[-1].parent]
def get_test_data_info_for_target(request: 'pytest.FixtureRequest'
) -> datatypes.ModuleInfo:
"""Get some path info of the target module."""
root_name = request.config.getoption(constants.OPT_TEST_ROOT)
datadir_name = request.config.getoption(constants.OPT_DATADIR_NAME)
path = get_target_module_path(request)
name = get_name_from_path(path)
parents = find_parent_dirs(path, root_name)
root = parents[-1]
if len(parents) == 1: # no sub dirs.
datadir = root / datadir_name / name
else:
datadir = root / datadir_name / parents[0].relative_to(root)
return datatypes.ModuleInfo(
root=root,
datadir=datadir,
subdirs=[d.name for d in datadir.glob('*') if d.is_dir()]
)
# vim:sw=4:ts=4:et:
|
"""
Tests for legend.
"""
import pytest
from pygmt import Figure
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import GMTTempFile
from pygmt.helpers.testing import check_figures_equal
@pytest.mark.mpl_image_compare
def test_legend_position():
"""
Try positioning with each of the four legend coordinate systems.
"""
fig = Figure()
fig.basemap(region=[-2, 2, -2, 2], frame=True)
positions = ["jTR+jTR", "g0/1", "n0.2/0.2", "x4i/2i/2i"]
for i, position in enumerate(positions):
fig.plot(x=[0], y=[0], style="p10p", label=i)
fig.legend(position=position, box=True)
return fig
@pytest.mark.mpl_image_compare
def test_legend_default_position():
"""
Try using the default legend position.
"""
fig = Figure()
fig.basemap(region=[-1, 1, -1, 1], frame=True)
fig.plot(x=[0], y=[0], style="p10p", label="Default")
fig.legend()
return fig
@check_figures_equal()
def test_legend_entries():
"""
Test different marker types/shapes.
"""
fig_ref, fig_test = Figure(), Figure()
# Use single-character arguments for the reference image
fig_ref = Figure()
fig_ref.basemap(J="x1i", R="0/7/3/7", B="")
fig_ref.plot(
data="@Table_5_11.txt",
S="c0.15i",
G="lightgreen",
W="faint",
l="Apples",
)
fig_ref.plot(data="@Table_5_11.txt", W="1.5p,gray", l='"My lines"')
fig_ref.plot(data="@Table_5_11.txt", S="t0.15i", G="orange", l="Oranges")
fig_ref.legend(D="JTR+jTR")
fig_test.basemap(projection="x1i", region=[0, 7, 3, 7], frame=True)
fig_test.plot(
data="@Table_5_11.txt",
style="c0.15i",
color="lightgreen",
pen="faint",
label="Apples",
)
fig_test.plot(data="@Table_5_11.txt", pen="1.5p,gray", label='"My lines"')
fig_test.plot(
data="@Table_5_11.txt", style="t0.15i", color="orange", label="Oranges"
)
fig_test.legend(position="JTR+jTR")
return fig_ref, fig_test
@pytest.mark.mpl_image_compare
def test_legend_specfile():
"""
Test specfile functionality.
"""
specfile_contents = """
G -0.1i
H 24 Times-Roman My Map Legend
D 0.2i 1p
N 2
V 0 1p
S 0.1i c 0.15i p300/12 0.25p 0.3i This circle is hachured
S 0.1i e 0.15i yellow 0.25p 0.3i This ellipse is yellow
S 0.1i w 0.15i green 0.25p 0.3i This wedge is green
S 0.1i f0.1i+l+t 0.25i blue 0.25p 0.3i This is a fault
S 0.1i - 0.15i - 0.25p,- 0.3i A dashed contour
S 0.1i v0.1i+a40+e 0.25i magenta 0.25p 0.3i This is a vector
S 0.1i i 0.15i cyan 0.25p 0.3i This triangle is boring
V 0 1p
D 0.2i 1p
N 1
G 0.05i
G 0.05i
G 0.05i
L 9 4 R Smith et al., @%5%J. Geophys. Res., 99@%%, 2000
G 0.1i
P
T Let us just try some simple text that can go on a few lines.
T There is no easy way to predetermine how many lines will be required,
T so we may have to adjust the box height to get the right size box.
"""
with GMTTempFile() as specfile:
with open(specfile.name, "w") as file:
file.write(specfile_contents)
fig = Figure()
fig.basemap(projection="x6i", region=[0, 1, 0, 1], frame=True)
fig.legend(specfile.name, position="JTM+jCM+w5i")
return fig
def test_legend_fails():
"""
Test legend fails with invalid spec.
"""
fig = Figure()
with pytest.raises(GMTInvalidInput):
fig.legend(spec=["@Table_5_11.txt"])
|
"""Test all the tools.
"""
from gaphas.canvas import Context
from gaphas.constraint import LineConstraint
from gaphas.tool import ConnectHandleTool
Event = Context
# Test handle connection tool glue method
def test_item_and_port_glue(simple_canvas):
"""Test glue operation to an item and its ports.
"""
ports = simple_canvas.box1.ports()
# Glue to port nw-ne
sink = simple_canvas.tool.glue(simple_canvas.line, simple_canvas.head, (120, 50))
assert sink.item == simple_canvas.box1
assert ports[0] == sink.port
# Glue to port ne-se
sink = simple_canvas.tool.glue(simple_canvas.line, simple_canvas.head, (140, 70))
assert sink.item == simple_canvas.box1
assert ports[1] == sink.port
# Glue to port se-sw
sink = simple_canvas.tool.glue(simple_canvas.line, simple_canvas.head, (120, 90))
assert sink.item == simple_canvas.box1
assert ports[2] == sink.port
# Glue to port sw-nw
sink = simple_canvas.tool.glue(simple_canvas.line, simple_canvas.head, (100, 70))
assert sink.item == simple_canvas.box1
assert ports[3] == sink.port
def test_failed_glue(simple_canvas):
"""Test glue from too far distance.
"""
sink = simple_canvas.tool.glue(simple_canvas.line, simple_canvas.head, (90, 50))
assert sink is None
def test_glue_no_port_no_can_glue(simple_canvas):
"""Test no glue with no port.
Test if glue method does not call ConnectHandleTool.can_glue method when
port is not found.
"""
class Tool(ConnectHandleTool):
def __init__(self, *args):
super(Tool, self).__init__(*args)
self._calls = 0
def can_glue(self, *args):
self._calls += 1
tool = Tool(simple_canvas.view)
# At 300, 50 there should be no item
sink = tool.glue(simple_canvas.line, simple_canvas.head, (300, 50))
assert sink is None
assert 0 == tool._calls
def test_connect(simple_canvas):
"""Test connection to an item.
"""
line, head = simple_canvas.line, simple_canvas.head
simple_canvas.tool.connect(line, head, (120, 50))
cinfo = simple_canvas.canvas.get_connection(head)
assert cinfo is not None
assert simple_canvas.box1 == cinfo.connected
assert cinfo.port is simple_canvas.box1.ports()[0], "port %s" % cinfo.port
assert isinstance(cinfo.constraint, LineConstraint)
# No default callback defined:
assert cinfo.callback is None
line, head = simple_canvas.line, simple_canvas.head
simple_canvas.tool.connect(line, head, (90, 50))
cinfo2 = simple_canvas.canvas.get_connection(head)
assert cinfo is not cinfo2, cinfo2
assert cinfo2 is None, cinfo2
def test_reconnect_another(simple_canvas):
"""Test reconnection to another item.
"""
line, head = simple_canvas.line, simple_canvas.head
simple_canvas.tool.connect(line, head, (120, 50))
cinfo = simple_canvas.canvas.get_connection(head)
assert cinfo is not None
item = cinfo.connected
port = cinfo.port
constraint = cinfo.constraint
assert item == simple_canvas.box1
assert port == simple_canvas.box1.ports()[0]
assert item != simple_canvas.box2
# Connect to box2, handle's connected item and connection data should
# differ
simple_canvas.tool.connect(line, head, (120, 150))
cinfo = simple_canvas.canvas.get_connection(head)
assert cinfo is not None
assert simple_canvas.box2 == cinfo.connected
assert simple_canvas.box2.ports()[0] == cinfo.port
# Old connection does not exist
assert item != cinfo.connected
assert constraint != cinfo.constraint
def test_reconnect_same(simple_canvas):
"""Test reconnection to same item.
"""
line, head = simple_canvas.line, simple_canvas.head
simple_canvas.tool.connect(line, head, (120, 50))
cinfo = simple_canvas.canvas.get_connection(head)
assert cinfo is not None
item = cinfo.connected
constraint = cinfo.constraint
assert item == simple_canvas.box1
assert item != simple_canvas.box2
# Connect to box1 again, handle's connected item and port should be the
# same but connection constraint will differ
simple_canvas.tool.connect(line, head, (120, 50))
cinfo = simple_canvas.canvas.get_connection(head)
assert cinfo is not None
assert simple_canvas.box1 == cinfo.connected
assert simple_canvas.box1.ports()[0] == cinfo.port
assert constraint != cinfo.constraint
def xtest_find_port(simple_canvas):
"""Test finding a port.
"""
line, head = simple_canvas.line, simple_canvas.head
p1, p2, p3, p4 = simple_canvas.box1.ports()
head.pos = 110, 50
port = simple_canvas.tool.find_port(line, head, simple_canvas.box1)
assert p1 == port
head.pos = 140, 60
port = simple_canvas.tool.find_port(line, head, simple_canvas.box1)
assert p2 == port
head.pos = 110, 95
port = simple_canvas.tool.find_port(line, head, simple_canvas.box1)
assert p3 == port
head.pos = 100, 55
port = simple_canvas.tool.find_port(line, head, simple_canvas.box1)
assert p4 == port
|
#! /usr/bin/python3
"""
This file contains GUI code for Configuring of lora home automation
Developed by - SB Components
http://sb-components.co.uk
"""
import logging
import os
from tkinter import font
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
import time
import webbrowser
os_name = os.name
if os.name == "posix":
COMPORT_BASE = "/dev/"
else:
COMPORT_BASE = ""
#from serial_comm import SerialComm
from time import sleep
import ctypes
import serial
import logging
import threading
from cryptography.fernet import Fernet
key = "Be1PA8snHgb1DS6oaWek62WLE9nxipFw3o3vB4uJ8ZI=" # "secret key" This must be kept secret
cipher_suite = Fernet(key) # This class provides both encryption and decryption facilities.
ctypes.windll.shcore.SetProcessDpiAwareness(1) # it increase the window clearity
dirName = 'imp'
if not os.path.exists(dirName):
os.mkdir(dirName)
class SerialComm(object):
"""
Low level serial operations
"""
log = logging.getLogger("serial")
log.addHandler(logging.StreamHandler())
logging.basicConfig(filename='imp/.log.log', level=logging.DEBUG)
def __init__(self, handlerNotification=None, *args, **kwargs):
self.__ser = None
self.alive = False
self.timeout = 0.01
self.rxThread = None
self.rxData = []
self._txLock = threading.Lock()
self.handlerNotification = handlerNotification
def connect_port(self, port='/dev/ttyS0', baud_rate=115200, timeout=0.5):
"""
Connects to the Comm Port
"""
try:
# open serial port
self.__ser = serial.Serial(port=port, baudrate=baud_rate,
timeout=timeout)
self.alive = True
self.rxThread = threading.Thread(target=self._readLoop)
self.rxThread.daemon = True
self.rxThread.start()
self.log.info("Connected with {} at {} "
"baudrate.".format(port, baud_rate))
return True
except serial.serialutil.SerialException:
self.alive = False
self.log.error("Couldn't connect with {}.".format(port))
return False
def disconnect(self):
"""
Stops read thread, waits for it to exit cleanly and close serial port
"""
self.alive = False
if self.rxThread:
self.rxThread.join()
self.close_port()
self.log.info("Serial Port Disconnected")
def read_port(self, n=1):
"""
Read n number of bytes from serial port
:param n: Number of bytes to read
:return: read bytes
"""
return self.__ser.read(n)
def read_line(self):
return self.__ser.readline()
# return self.__ser.readall()
def write_port(self, data):
"""
:param data: data to send to servo, type: bytearray
:return: Number of bits sent
"""
return self.__ser.write(data)
def close_port(self):
"""
Check if the port is open.
Close the Port if open
"""
if self.__ser and self._connected:
self.__ser.close()
self.alive = False
def flush_input(self):
self.__ser.reset_input_buffer()
def flush_output(self):
self.__ser.reset_output_buffer()
@property
def _connected(self):
if self.__ser:
return self.__ser.is_open
@property
def _waiting(self):
if self.__ser:
return self.__ser.inWaiting()
def _readLoop(self):
"""
Read thread main loop
"""
try:
while self.alive:
data = self.read_line()
if data != b'':
self.log.info("Serial Response: %s", data)
self.rxData.append(data)
self.update_rx_data(data)
#self.rxData = []
except serial.SerialException as SE:
self.log.error("Serial Exception: {}.".format(SE))
self.close_port()
def write(self, data):
"""
Write data to serial port
"""
with self._txLock:
self.log.info("Serial Write: {}".format(data))
self.write_port(data)
self.flush_input()
return True
def update_rx_data(self, data):
pass
class LoraHat(SerialComm):
def __init__(self):
SerialComm.__init__(self)
def connect_hat(self, port, baud_rate):
self.connect_port(port=port, baud_rate=baud_rate, timeout=0.5)
def disconnect_hat(self):
self.disconnect()
def transmit_message(self, data):
self.write(data)
def set_variables(self):
pass
class MainApp(tk.Tk, LoraHat):
"""
This is a class for Creating Frames and Buttons for left and top frame
"""
port = "COM19"
current_baud = 9600
def __init__(self, *args, **kwargs):
global logo, img, xy_pos
tk.Tk.__init__(self, *args, **kwargs)
LoraHat.__init__(self)
self.screen_width = tk.Tk.winfo_screenwidth(self)
self.screen_height = tk.Tk.winfo_screenheight(self)
self.app_width = 1100
self.app_height = 650
self.xpos = (self.screen_width / 2) - (self.app_width / 2)
self.ypos = (self.screen_height / 2) - (self.app_height / 2)
xy_pos = self.xpos, self.ypos
self.relay_text = tk.StringVar()
self.label_font = font.Font(family="Helvetica", size=20)
self.heading_font = font.Font(family="Helvetica", size=12)
self.geometry(
"%dx%d+%d+%d" % (self.app_width, self.app_height, self.xpos,
self.ypos))
if not self.screen_width > self.app_width:
self.attributes('-fullscreen', True)
self.title("Lora Home Automation")
self.config(bg="gray85")
self.label_font = font.Font(family="Helvetica", size=16)
self.heading_font = font.Font(family="Helvetica", size=18)
self.LARGE_FONT = ("Verdana", 14)
img = tk.PhotoImage(file=path + '/images/smart-home.png')
logo = tk.PhotoImage(file=path + '/images/sblogo.png')
self.top_frame_color = "dimgray"
self.left_frame_color = "gray21"
self.left_frame_color_1 = "gray30"
self.middle_frame_color = "gray30"####
self.middle_frame_color1 = "gray35"####
self.right_frame_color = "gray24"
self.top_frame = tk.Frame(self, height=int(self.app_height / 9), bd=2,
width=self.app_width,
bg=self.top_frame_color)
self.top_frame.pack(side="top", fill="both")
self.left_frame = tk.Frame(self, width=int(self.app_width / 2.7),
bg=self.left_frame_color)
self.left_frame.pack(side="left", fill="both", expand="True")
self.left_frame.pack_propagate()
self.middle_frame1=tk.Frame(self.left_frame,width=400,height=340,bg=self.middle_frame_color1)
self.middle_frame1.pack(pady=10,padx=10)
self.middle_frame1.place(x=0,y=0)
self.middle_frame=tk.Frame(self.left_frame,width=352,height=340,bg=self.middle_frame_color)
self.middle_frame.pack(pady=10,padx=10)
self.middle_frame.place(x=400,y=0)
self.right_frame = tk.Frame(self, bg=self.right_frame_color)
self.right_frame.pack(side="right", fill="both", expand=True)
self.right_frame.propagate(0)
self.rtx_frame = TransceiverFrame(parent=self.right_frame,controller=self)
self.rtx_frame.tkraise()
# Top Bar
tk.Label(self.top_frame, bg="dimgray", fg="ghostwhite",text="LORA HOME AUTOMATION SYSTEM",font=font.Font(family="times new roman", size=35)).place(x=60,y=5)
tk.Label(self.middle_frame, bg="gray30", fg="ghostwhite",text="Port Configuration",font=font.Font(family="times new roman", size=25)).place(x=20,y=20)
tk.Label(self.middle_frame1, bg="gray35", fg="ghostwhite",text="Relay",font=font.Font(family="times new roman", size=25)).place(x=130,y=20)
def relay1():
self.btn1.configure(bg="yellow")
#lora.write(b'1relay1')#send "1relay1" to other lora
if self.rtx_frame.controller.alive:
with open('imp/relay1.txt','rb') as f:
new_1 = f.readlines()
rel1 = cipher_suite.decrypt(new_1[0])
rel1 = rel1.decode("utf-8")
rel_1 = rel1.rstrip()
self.rtx_frame.controller.transmit_message(rel_1.encode("utf-8"))
else:
messagebox.showerror("Port Error","Serial port not connected!")
def relay2():
self.btn2.configure(bg="yellow")
#lora.write(b'2relay2')#send "2relay2" to other lora
if self.rtx_frame.controller.alive:
with open('imp/relay2.txt','rb') as f:
new_2 = f.readlines()
rel2 = cipher_suite.decrypt(new_2[0])
rel2 = rel2.decode("utf-8")
rel_2 = rel2.rstrip()
self.rtx_frame.controller.transmit_message(rel_2.encode("utf-8"))
else:
messagebox.showerror("Port Error","Serial port not connected!")
def relay3():
self.btn3.configure(bg="yellow")
#lora.write(b'3relay3')#send "3relay3" to other lora
if self.rtx_frame.controller.alive:
with open('imp/relay3.txt','rb') as f:
new_3 = f.readlines()
rel3 = cipher_suite.decrypt(new_3[0])
rel3 = rel3.decode("utf-8")
rel_3 = rel3.rstrip()
self.rtx_frame.controller.transmit_message(rel_3.encode("utf-8"))
else:
messagebox.showerror("Port Error","Serial port not connected!")
def relay4():
self.btn4.configure(bg="yellow")
#lora.write(b'4relay4')#send "4relay4" to other lora
if self.rtx_frame.controller.alive:
with open('imp/relay4.txt','rb') as f:
new_4 = f.readlines()
rel4 = cipher_suite.decrypt(new_4[0])
rel4 = rel4.decode("utf-8")
rel_4 = rel4.rstrip()
self.rtx_frame.controller.transmit_message(rel_4.encode("utf-8"))
else:
messagebox.showerror("Port Error","Serial port not connected!")
def allRelayON_OFF():
self.btn5.configure(bg="yellow")
if self.rtx_frame.controller.alive:
lst = ['imp/relay1.txt','imp/relay2.txt','imp/relay3.txt','imp/relay4.txt']
for i in range(len(lst)):
with open(lst[i],'rb') as f:
new1_1 = f.readlines()
rel11 = cipher_suite.decrypt(new1_1[0])
rel11 = rel11.decode("utf-8")
rel_11 = rel11.rstrip()
self.rtx_frame.controller.transmit_message(rel_11.encode("utf-8"))
time.sleep(0.2)
else:
messagebox.showerror("Port Error","Serial port not connected!")
def statusRelay():
self.btn5.configure(bg="yellow")
if self.rtx_frame.controller.alive:
msg = '12status12'
self.rtx_frame.controller.transmit_message(msg.encode("utf-8"))
time.sleep(0.2)
else:
messagebox.showerror("Port Error","Serial port not connected!")
self.btn1=tk.Button(self, text = 'Relay 1',bg='yellow', bd = '10',command = relay1,activebackground='white')
self.btn1.place(x=50,y=150)
self.btn2 = tk.Button(self, text = 'Relay 2',bg='yellow', bd = '10',command = relay2,activebackground='white')
self.btn2.place(x=230,y=150)
self.btn3 = tk.Button(self, text = 'Relay 3',bg='yellow', bd = '10',command = relay3,activebackground='white')
self.btn3.place(x=50,y=250)
self.btn4 = tk.Button(self, text = 'Relay 4',bg='yellow', bd = '10',command = relay4,activebackground='white')
self.btn4.place(x=230,y=250)
self.btn5 = tk.Button(self, text = 'All Relay ON/OFF',bg='yellow', bd = '10',command = allRelayON_OFF,activebackground='white')
self.btn5.place(x=20,y=350)
self.btn6 = tk.Button(self, text = 'Status Of Relay',bg='yellow', bd = '10',command = statusRelay,activebackground='white')
self.btn6.place(x=210,y=350)
if not os.path.isfile("imp/passd.txt"):
now = open("imp/passd.txt", "wb")
user = "admin@123"
password = "sbcomponents"
user = cipher_suite.encrypt(bytes(user, encoding='utf-8'))
password = cipher_suite.encrypt(bytes(password, encoding='utf-8'))
now.write(user)
now.write(b'\n')
now.write(password)
now.close()
if not os.path.isfile("imp/relay1.txt"):
now = open("imp/relay1.txt", "wb")
r1 = "1relay1"
r1 = cipher_suite.encrypt(bytes(r1, encoding='utf-8'))
now.write(r1)
now.close()
if not os.path.isfile("imp/relay2.txt"):
now = open("imp/relay2.txt", "wb")
r2 = "2relay2"
r2 = cipher_suite.encrypt(bytes(r2, encoding='utf-8'))
now.write(r2)
now.close()
if not os.path.isfile("imp/relay3.txt"):
now = open("imp/relay3.txt", "wb")
r3 = "3relay3"
r3 = cipher_suite.encrypt(bytes(r3, encoding='utf-8'))
now.write(r3)
now.close()
if not os.path.isfile("imp/relay4.txt"):
now = open("imp/relay4.txt", "wb")
r4 = "4relay4"
r4= cipher_suite.encrypt(bytes(r4, encoding='utf-8'))
now.write(r4)
now.close()
def forgotPassword():
with open('imp/passd.txt','rb') as f:
new = f.readlines()
user_ = cipher_suite.decrypt(new[0])
user_ = user_.decode("utf-8")
user_1 = user_.rstrip()
if self.user_entry.get() == user_1:
now = open("imp/passd.txt", "wb")
pass_1 = self.pass_entry.get()
user = cipher_suite.encrypt(bytes(user_1, encoding='utf-8'))
password = cipher_suite.encrypt(bytes(pass_1, encoding='utf-8'))
now.write(user)
now.write(b'\n')
now.write(password)
now.close()
self.user_entry.delete(0, 'end')
self.pass_entry.delete(0, 'end')
else:
messagebox.showerror("error", "wrong user name")
self.user_entry.delete(0, 'end')
self.pass_entry.delete(0, 'end')
def changRelay_code(self):
self.relay1_var = tk.StringVar()
self.relay2_var = tk.StringVar()
self.relay3_var = tk.StringVar()
self.relay4_var = tk.StringVar()
tk.Label(self, bg="gray21", fg="ghostwhite",text="Relay 1",
font=font.Font(family="times new roman", size=20)).place(x=425,y=470)
tk.Label(self, bg="gray21", fg="ghostwhite",text="Relay 2",
font=font.Font(family="times new roman", size=20)).place(x=425,y=510)
tk.Label(self, bg="gray21", fg="ghostwhite",text="Relay 3",
font=font.Font(family="times new roman", size=20)).place(x=425,y=550)
tk.Label(self, bg="gray21", fg="ghostwhite",text="Relay 4",
font=font.Font(family="times new roman", size=20)).place(x=425,y=590)
self.relay1_entry = tk.Entry(self,textvariable = self.relay1_var, font=('calibre',16,'normal'),width=12)
self.relay1_entry.place(x=550,y=470)
self.relay2_entry = tk.Entry(self,textvariable = self.relay2_var, font=('calibre',16,'normal'),width=12)
self.relay2_entry.place(x=550,y=508)
self.relay3_entry = tk.Entry(self,textvariable = self.relay3_var, font=('calibre',16,'normal'),width=12)
self.relay3_entry.place(x=550,y=547)
self.relay4_entry = tk.Entry(self,textvariable = self.relay4_var, font=('calibre',16,'normal'),width=12)
self.relay4_entry.place(x=550,y=585)
changRelay_code(self)
def login_in():
with open('imp/passd.txt','rb') as f:
new = f.readlines()
user_ = cipher_suite.decrypt(new[0])
user_ = user_.decode("utf-8")
user_1 = user_.rstrip()
pass_ = cipher_suite.decrypt(new[1])
pass_ = pass_.decode("utf-8")
pass_1 = pass_.rstrip()
if self.user_entry.get() == user_1 and self.pass_entry.get() == pass_1:
def save_relay_code():
d1 = self.relay1_entry.get()
d2 = self.relay2_entry.get()
d3 = self.relay3_entry.get()
d4 = self.relay4_entry.get()
if len(d1)>0:
now1 = open("imp/relay1.txt", "wb")
data1 = self.relay1_entry.get()
data1 = cipher_suite.encrypt(bytes(data1, encoding='utf-8'))
now1.write(data1)
now1.close()
if len(d2)>0:
now2 = open("imp/relay2.txt", "wb")
data2 = self.relay2_entry.get()
data2 = cipher_suite.encrypt(bytes(data2, encoding='utf-8'))
now2.write(data2)
now2.close()
if len(d3)>0:
now3 = open("imp/relay3.txt", "wb")
data3 = self.relay3_entry.get()
data3 = cipher_suite.encrypt(bytes(data3, encoding='utf-8'))
now3.write(data3)
now3.close()
if len(d4)>0:
now4 = open("imp/relay4.txt", "wb")
data4 = self.relay4_entry.get()
data4 = cipher_suite.encrypt(bytes(data4, encoding='utf-8'))
now4.write(data4)
now4.close()
self.relay1_entry.delete(0, 'end')
self.relay2_entry.delete(0, 'end')
self.relay3_entry.delete(0, 'end')
self.relay4_entry.delete(0, 'end')
save_relay_code()
self.user_entry.delete(0, 'end')
self.pass_entry.delete(0, 'end')
else:
messagebox.showerror("error", "login Failed")
self.relay1_entry.delete(0, 'end')
self.relay2_entry.delete(0, 'end')
self.relay3_entry.delete(0, 'end')
self.relay4_entry.delete(0, 'end')
self.large_font = ('times new roman', 22)
self.small_font = ('times new roman', 10)
self.user = tk.StringVar()
self.password = tk.StringVar()
tk.Label(self, bg="gray21", fg="ghostwhite",text="Relay Code Configuration",
font=font.Font(family="times new roman", size=22)).place(x=205,y=415)
tk.Label(self, bg="gray21", fg="ghostwhite",text="User",
font=font.Font(family="times new roman", size=20)).place(x=15,y=470)
tk.Label(self, bg="gray21", fg="ghostwhite",text="Password",
font=font.Font(family="times new roman", size=20)).place(x=15,y=510)
self.user_entry = tk.Entry(self,textvariable = self.user, font=('calibre',16,'normal'),width=15)
self.user_entry.place(x=150,y=470)
self.pass_entry = tk.Entry(self,textvariable = self.password, font=('calibre',16,'normal'),width=15,show='*')
self.pass_entry.place(x=150,y=510)
self.btn7 = tk.Button(self, text = 'Login / Update Relay Code',bg='yellow', bd = '8',command = login_in,activebackground='white')
self.btn7.place(x=150,y=550)
self.btn8 = tk.Button(self, text = 'Forgot Password',bg='yellow', bd = '6',command = forgotPassword,activebackground='white')
self.btn8.place(x=150,y=600)
self.left_frame_contents()
def left_frame_contents(self):
"""
This function creates the left frame widgets
"""
global logo
x_ref, y_ref = 10, 20
font_ = font.Font(family="Helvetica", size=11)
self.baud_var = tk.StringVar()
self._com_port = tk.StringVar()
self._set_baud_rate_var = tk.IntVar()
self.baud_var.set("9600")
self._com_port.set(self.port)
self._set_baud_rate_var.set(self.current_baud)
self.baud_options = ["1200", "2400", "4800", "9600", "19200", "38400",
"57600", "115200"]
self._set_baud_rate_options = [1200, 2400, 4800, 9600, 19200, 38400,
57600, 115200]
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color_1,font=self.LARGE_FONT, text="Port").place(x=x_ref + 450,y=y_ref + 120)
self.com_entry = tk.Entry(self.left_frame, fg="black",font=self.label_font, width=8,textvariable=self._com_port)
self.com_entry.place(x=x_ref + 580, y=y_ref + 120)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color_1,font=self.LARGE_FONT, text="Baudrate").place(x=x_ref + 450,y=y_ref + 160)
tk.OptionMenu(self.left_frame, self._set_baud_rate_var,*self._set_baud_rate_options).place(x=x_ref + 580,y=y_ref + 160,width=105,height=30)
self.connect_button = tk.Button(self.left_frame, text="Connect",fg="white", bg=self.left_frame_color,font=self.LARGE_FONT, width=9,bd=4,
highlightthickness=0,command=self.connect_lora_hat)
self.connect_button.place(x=x_ref + 450, y=y_ref + 200)
self.circle = tk.Canvas(self.left_frame, height=30, width=30,
bg=self.left_frame_color_1, bd=0,
highlightthickness=0)
self.indication = self.circle.create_oval(5, 5, 30, 30, fill="red")
self.circle.place(x=x_ref + 600, y=y_ref + 200)
def set_variables(self):
self._baud_rate = self.baud_options.index(self.baud_var.get())
def get_values(self, data):
self.baud_var.set(self.baud_options[data[6] >> 5])
def connect_lora_hat(self):
"""
This function connects the serial port
"""
if self.connect_button.cget(
'text') == 'Connect' and self._com_port.get():
self.connect_hat(port=COMPORT_BASE + self._com_port.get(),
baud_rate=self._set_baud_rate_var.get())
if self.alive:
self.connect_button.config(relief="sunken", text="Disconnect")
self.circle.itemconfigure(self.indication, fill="green3")
self.com_entry.config(state="readonly")
else:
messagebox.showerror("Port Error",
"Couldn't Connect with {} ".format(self._com_port.get(), self._set_baud_rate_var.get()))
elif self.connect_button.cget('text') == 'Disconnect':
self.connect_button.config(relief="raised", text="Connect")
self.circle.itemconfigure(self.indication, fill="red")
self.com_entry.config(state="normal")
self.disconnect_hat()
def update_rx_data(self, data):
try:
data = data.decode("utf-8")
self.rtx_frame.rx_text.set(data + "\n")
relay_1 = data[0]
relay_2 = data[1]
relay_3 = data[2]
relay_4 = data[3]
if relay_1 == '0':
self.btn1.configure(bg="yellow")
if relay_2 == '0':
self.btn2.configure(bg="yellow")
if relay_3 == '0':
self.btn3.configure(bg="yellow")
if relay_4 == '0':
self.btn4.configure(bg="yellow")
if relay_1 == '1':
self.btn1.configure(bg="red")
if relay_2 == '1':
self.btn2.configure(bg="red")
if relay_3 == '1':
self.btn3.configure(bg="red")
if relay_4 == '1':
self.btn4.configure(bg="red")
self.rxData = []
except:
pass
class TransceiverFrame(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.LARGE_FONT = self.controller.LARGE_FONT
self.bg_color = self.controller.right_frame_color
#self.talk_var = tk.IntVar()
#self.talk_var.set(0)
self.rx_text = tk.StringVar()
logo = tk.PhotoImage(file=path + '/images/sblogo.png')
url = "https://shop.sb-components.co.uk/"
LabelButton(parent, url=url, image=logo,height=85,width = 300,
bg="white", x_pos=20, y_pos=50)
# Receiver Label Box
self.rx_label = tk.Label(parent, justify="left", anchor="nw",
wraplength=270,
bg="gray80", fg="red",
bd=2, height=4, width=37, padx=10, pady=10,
textvariable=self.rx_text)
self.rx_label.place(x=10, y=210)
tk.Label(parent, fg="white", bg=self.bg_color, font=font.Font(
family="Helvetica", size=15), text="Rx Message").place(x=10, y=175)
# Transmitter Text Box
self.tx_text = tk.Text(parent, padx=10, pady=
10, bg="gray80",
fg="red", height=4, width=30,
wrap="word",
relief="sunken", state="normal")
self.tx_text.place(x=10, y=370)
tk.Label(parent, fg="white", bg=self.bg_color, font=font.Font(family="Helvetica", size=15), text="Tx Message").place(x=10,y=335)
self.send_button = tk.Button(parent, text='Send',
fg="white", bg="gray30", relief="raised",
font=self.LARGE_FONT, bd=4,
highlightthickness=0, width=10,
command=self.send_msg)
self.send_button.place(x=10, y=478)
def send_msg(self):
if self.controller.alive:
msg = self.tx_text.get("1.0", "end")
self.controller.transmit_message(msg.encode("utf-8"))
else:
messagebox.showerror("Port Error",
"Serial port not connected!")
class LabelButton(object):
def __init__(self, master, image=None, height=40, width=250, bg="white",
url=None, x_pos=7, y_pos=700):
global logo
# if image is None:
image = logo
self.url = url
self.label = tk.Label(master, image=logo, height=height,
width=width, bg=bg)
self.label.place(x=x_pos, y=y_pos)
self.label.bind("<Button-1>", self.open_url)
def open_url(self, tmp):
webbrowser.open(self.url, new=1)
logo = None
img = None
path = os.path.abspath(os.path.dirname(__file__))
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
app = MainApp()
app.tk.call('wm', 'iconphoto', app._w, img)
app.resizable(0, 0)
app.mainloop()
|
import os
import sys
from flask import Flask
from flask.ext.script import Manager
from flask.ext.seasurf import SeaSurf
from flask_collect import Collect
import gru.utils.web
import gru.utils.templates
import gru.utils.logs
import gru.utils.fs
from gru.config import settings
from gru.plugins.loader.registry import PluginRegistry
from gru.contrib.auth.sessions import ItsdangerousSessionInterface
from gru.contrib.auth.views import blueprint as auth_views
from gru.contrib.inventory.views import blueprint as inventory_views
def generate_app():
app = Flask(
__name__,
template_folder=gru.utils.fs.relative_to(__file__, 'gru/templates'),
static_folder=gru.utils.fs.relative_to(__file__, 'gru/static'))
app.debug = settings.get('flask.debug')
app.permanent_session_lifetime = settings.get('flask.session_seconds')
# Client-side sessions with signed cookies
app.secret_key = settings.get('flask.secret_key')
app.session_interface = ItsdangerousSessionInterface()
# Setup logging
gru.utils.logs.setup_logging(app, settings)
# CSRF protection
SeaSurf(app)
# Load sub-applications
app.register_blueprint(auth_views, url_prefix='/auth')
app.register_blueprint(inventory_views, url_prefix='/inventory')
# Append plugin paths to sys.path
for directory in settings.get('plugins.directories'):
sys.path.append(os.path.abspath(os.path.expanduser(directory)))
# Register plugins
root_path = os.path.realpath(__file__)
app.plugins = PluginRegistry(app, settings)
for plugin_path in settings.get('plugins.modules'):
app.plugins.register(plugin_path)
# Also add authentication backend and inventory provider
app.plugins.register(settings.get('authentication.backend'))
app.plugins.register(settings.get('inventory.provider'))
gru.utils.templates.setup(app)
gru.utils.web.setup_base_views(app, settings)
app.wsgi_app = gru.utils.web.method_rewrite_middleware(app.wsgi_app)
return app
# Entry point.
app = generate_app()
if __name__ == '__main__':
# Wrap the app in a Flask-Scripts manager
manager = Manager(app)
collect = Collect()
collect.init_app(app)
collect.init_script(manager)
manager.run()
|
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from bsm.cmd import Base
from bsm.cmd import CmdError
class Config(Base):
def execute(self, config_type='', item_list=[]):
if not config_type or config_type == 'all':
return self._bsm.config_all()
if not item_list:
return self._bsm.config(config_type)
current_config = self._bsm.config(config_type)
current_item = [config_type]
for item in item_list:
if not isinstance(current_config, Mapping):
raise CmdError('Config "{0}" is not a map: {1}'.format(':'.join(current_item), type(current_config)))
current_item.append(item)
if item not in current_config:
raise CmdError('Config "{0}" not found'.format(':'.join(current_item)))
current_config = current_config[item]
return current_config
|
from .FilesFunctions import Files
from .USBKey import USBKey |
# Copyright 2015 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from vitrage.datasources.nagios import NAGIOS_DATASOURCE
from vitrage.datasources.nagios.transformer import NagiosTransformer
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.host.transformer import HostTransformer
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.datasources.nova.instance.transformer import InstanceTransformer
from vitrage.datasources.nova.zone import NOVA_ZONE_DATASOURCE
from vitrage.datasources.nova.zone.transformer import ZoneTransformer
from vitrage.entity_graph.processor.transformer_manager import\
TransformerManager
from vitrage.opts import register_opts
from vitrage.tests import base
class TransformerManagerTest(base.BaseTest):
OPTS = [
cfg.ListOpt('types',
default=[NAGIOS_DATASOURCE,
NOVA_HOST_DATASOURCE,
NOVA_INSTANCE_DATASOURCE,
NOVA_ZONE_DATASOURCE],
help='Names of supported data sources'),
cfg.ListOpt('path',
default=['vitrage.datasources'],
help='base path for data sources')
]
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(TransformerManagerTest, cls).setUpClass()
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.OPTS, group='datasources')
for datasource in cls.conf.datasources.types:
register_opts(cls.conf, datasource, cls.conf.datasources.path)
cls.manager = TransformerManager(cls.conf)
def test_transformer_registration_nagios(self):
self.assertIsInstance(self.manager.get_transformer
(NAGIOS_DATASOURCE), NagiosTransformer)
def test_transformer_registration_nova_host(self):
self.assertIsInstance(self.manager.get_transformer
(NOVA_HOST_DATASOURCE), HostTransformer)
def test_transformer_registration_nova_instance(self):
self.assertIsInstance(self.manager.get_transformer
(NOVA_INSTANCE_DATASOURCE), InstanceTransformer)
def test_transformer_registration_nova_zone(self):
self.assertIsInstance(self.manager.get_transformer
(NOVA_ZONE_DATASOURCE), ZoneTransformer)
|
import random, re, argparse
parser = argparse.ArgumentParser(description='Preprocess a corpus.')
parser.add_argument('--corpus_path', help='path to the corpus', type=str, required=True)
parser.add_argument('--out_path', help='path to the corpus', type=str, required=True)
parser.add_argument('--vocab_size', help='vocabulary size', type=int, default=50000)
parser.add_argument('--sample_size', help='vocabulary size', type=int, default=1000000)
args = parser.parse_args()
def load_corpus(path):
corpus = []
corpus_all_words = []
corpus_single_words = []
with open(path) as f:
for line in f:
str = line.strip()
words = str.split(' ')
if len(words) >= 15 and len(words) <= 50:
if re.match(r'[A-Z]+', str): # should start by capital alphabet
hyphens = re.findall(r'\-', str)
numbers = re.findall(r'\d+[,\.\-]*\d*', str)
commas = re.findall(r',', str)
if len(hyphens) <= 1 and len(numbers) <= 1 and len(commas) <= 2:
corpus.append(str)
words = list(str.split(' '))
single_words = set()
for w in words:
if words.count(w) == 1:
single_words.add(w)
corpus_all_words.append(words)
corpus_single_words.append(single_words)
return corpus, corpus_all_words, corpus_single_words
def count_words(corpus_all_words):
dic = {}
for words in corpus_all_words:
for w in words:
if w in dic:
dic[w] += 1
else:
dic[w] = 1
sdict = dict(sorted(dic.items(), key=lambda item: item[1], reverse=True))
return sdict
def match(corpus_single_words, vocab):
mcorpus = {}
for w in vocab:
mcorpus[w] = []
for i, ws in enumerate(corpus_single_words):
for matchd_w in ws & vocab:
mcorpus[matchd_w].append(i)
sorted_mcorpus = dict(sorted(mcorpus.items(), key=lambda item: len(item[1]), reverse=True))
return sorted_mcorpus
if __name__ == '__main__':
print('Loading...')
corpus, corpus_all_words, corpus_single_words = load_corpus(args.corpus_path)
print('Done! Counting words..')
wfreq = count_words(corpus_all_words)
# Skip top-most frequent words
print('Done! Selecting vocab...')
skip = int(len(wfreq) * 0.0001)
vocab = set()
i = 0
for w, c in wfreq.items():
if i < skip:
i += 1
continue
elif len(vocab) == args.vocab_size:
break
if w.isalpha():
vocab.add(w)
# Matching
print('Matching...')
mcorpus = match(corpus_single_words, vocab)
# Sampling
print('Done! Sampling...')
out_corpus = []
used_sent_idx = set()
while len(out_corpus) < args.sample_size:
for w, list in mcorpus.items():
if len(list) > 0:
ridx = random.choice(list)
if ridx not in used_sent_idx:
out_corpus.append((w, ridx))
if len(out_corpus) == args.sample_size:
break
used_sent_idx.add(ridx)
# Save
print('Done. Save!')
with open(args.out_path, 'w') as fw:
for item in out_corpus:
fw.write('{0}\t{1}\n'.format(item[0], corpus[item[1]]))
vocab = set()
for term, _ in out_corpus:
vocab.add(term)
with open(args.out_path+'.vocab', 'w') as fw:
for v in vocab:
fw.write(v + '\n')
|
from math import sqrt, floor
#Da biblioteca de matemática importe sqrt e floor.
num = int(input('Digite um número: '))
raiz = sqrt(num)
#Calcula a raiz quadrada de num
print('A raiz de {} é igual a {:.2f}'.format(num, floor(raiz))) |
from django.apps import AppConfig
class CrosswalkConfig(AppConfig):
name = "crosswalk"
|
import numpy as np
from .lin_solve import SpookLinSolve
from .quad_program import SpookL1
import scipy.sparse as sps
from .utils import laplacian_square_S, dict_innerprod
import os
# from .utils import phspec_preproc
from pbasex import pbasex
from matplotlib import pyplot as plt
class PhotonFreqResVMI:
"""
This is a specially derived class for omega-resolved VMI
Parameters
vls_spec_dict: Dictionary of preprocessed single-shot photon spectra
processed_quad_dict:Dictionary of preprocessed single-shot image quadrants
gData: Pre-calculated G data by pBASEX
precontractedData: A dictionary or a npz file containing the following keys
"A": VLS spectra
"AtA": pre-contracted A.T @ A
"AtQuad": pre-contracted A.T @ B where B are the flattened images
"vlsbounds" or "vlse_2bounds": boundary indices of the ROI along omega
alpha_vmi: Magnification factor of VMI
pxWeights: Weights over pixels
Keyword arguments
spook_kwargs: kwargs that will be directly passed to SpookLinSolve
"""
def __init__(self, vls_spec_dict, processed_quad_dict, gData,
precontractedData=None, alpha_vmi=1, pxWeights=None,
sparsity="L2", **spook_kwargs):
if precontractedData is not None:
dat = precontractedData
A = dat['A'] # This A is preferably in full range, but the cropped one is fine.
Na_full = A.shape[1]
keys = dat.files if isinstance(precontractedData, np.lib.npyio.NpzFile) else list(dat.keys())
if "vlsbounds" in keys:
bounds = dat['vlsbounds']
elif "vlse_2bounds" in keys:
bounds = dat['vlse_2bounds']
else:
bounds = (0,Na_full) # No cropping
# print(bounds)
AtA = dat['AtA']
AtQuad = dat['AtQuad']
if AtA.shape[0] == Na_full:
AtA = AtA[bounds[0]:bounds[1], bounds[0]:bounds[1]]
if AtQuad.shape[0] == Na_full:
AtQuad = AtQuad[bounds[0]:bounds[1],:]
elif AtQuad.shape[0] != bounds[1]-bounds[0]:
raise ValueError("Info loss in AtQuad: AtQuad.shape[0]=%d is neither ptp(bounds)=%d or A.shape[1]=%d."%(AtQuad.shape[0],bounds[1]-bounds[0],Na_full)
+"Please verify precontractedData[vlsbounds] is properly set.")
if AtA.shape[0] != AtQuad.shape[0]: # redo AtA contraction
A1 = A[:,bounds[0]:bounds[1]]
AtA = A1.T @ A1
else:
print("#Photon spectra:", len(vls_spec_dict.keys()), ". #VMI images", len(processed_quad_dict.keys()))
BIDs = list(vls_spec_dict.keys())
A = np.asarray([vls_spec_dict[b] for b in BIDs])
Amean = A.mean(axis=0)
bounds = np.argwhere(Amean > Amean.max() * np.exp(-2))
bounds = (int(bounds.min()), int(bounds.max())+1)
# cropped = True
A1 = A[:,bounds[0]:bounds[1]]
AtA = A1.T @ A1
AtQuad = dict_innerprod(vls_spec_dict, processed_quad_dict, bounds)
npz_fname = "precontracted.npz"
if os.path.exists(npz_fname):
print("Overwriting", npz_fname)
np.savez_compressed(npz_fname, A=A, BIDs=BIDs, AtA=AtA, AtQuad=AtQuad, vlse_2bounds=bounds)#, cropped=cropped)
# if not cropped:
# AtA = AtA[bounds[0]:bounds[1], bounds[0]:bounds[1]]
# AtQuad = AtQuad[bounds[0]:bounds[1]]
# cropped = True
if pxWeights is None:
GtG = gData['V'] @ np.diag(gData['S']**2) @ gData['V'].T
AtBG = AtQuad @ (gData['V'] @ np.diag(gData['S']) @ gData['Up']).T
else:
w = pxWeights.ravel()
tmp = gData['V'] @ np.diag(gData['S'])
tmp2 = tmp @ (gData['Up'] * w)
# calculating in this order is efficient when a lot of pixels are 0 weighted
GtG = (tmp2 @ gData['Up'].T) @ tmp.T
AtBG = AtQuad @ tmp2.T
rsmoother = gData['frk'].T @ laplacian_square_S(gData['x'].size, True) @ gData['frk']
rsmoother = sps.kron(sps.eye(gData['nl']), rsmoother)
print(r"Tensor shapes: (A \otimes G).T@B, AtA, GtG, rsmoother")
print(AtBG.shape, AtA.shape, GtG.shape, rsmoother.shape)
if sparsity == 'L2':
SpkCls = SpookLinSolve
elif sparsity == 'L1':
SpkCls = SpookL1
else:
raise ValueError("Unrecognized sparisty input: %s"%sparsity)
self.__spook = SpkCls(AtBG, AtA, 'contracted', GtG, Bsmoother=rsmoother, **spook_kwargs)
self.__gData = gData
self._alpha = alpha_vmi
self.__vlsAxisInPX = np.arange(A.shape[1])[bounds[0]:bounds[1]]
def getXopt_Ewl(self, l=None, **spook_kwargs):
Xo = self.__spook.getXopt(**spook_kwargs)
gData = self.__gData
Xo_wlk = Xo.reshape(Xo.shape[0], gData['nl'], -1)
ret = (Xo_wlk @ gData['frk'].T).transpose((2,0,1))
self.Xo_Ewl = ((0.5/self._alpha) * gData['x'][:,None,None]) * ret
if l is not None:
return self.Xo_Ewl[:,l//2]
return self.Xo_Ewl
def show_res(self, l=0, ax=None):
if ax is None:
ax = plt.subplot(111)
gData = self.__gData
ax.pcolormesh(self._alpha*(gData['x']**2), self.__vlsAxisInPX,
self.Xo_Ewl[:,:,l//2].T, shading='nearest')
ax.set_ylabel("VLS pixel")
ax.set_xlabel("E" + ("[px^2]" if self._alpha==1 else "[eV]"))
return ax
@property
def keAxis(self):
return self._alpha*(self.__gData['x']**2)
@property
def vlsAxis_px(self):
return self.__vlsAxisInPX
def getspook(self):
return self.__spook
|
import os, glob, shutil
from PIL import Image
import numpy as np
import cv2
with open('mapFromADE.txt', 'r') as f:
data = f.readlines()
CATE_TO_ADE = {} # label 15,36,89 maps to two ADE categories
for line in data:
semantic_id, ade_id = map(int, line.strip().split())
CATE_TO_ADE.setdefault(semantic_id, []).append(ade_id)
LABEL_DICT = {
1: [4], # 29
2: [1],
3: [15, 59],
4: [9, 64],
5: [19],
6: [23, 101],
7: [28, 131, 142],
8: [6],
9: [140],
10: [8], # 40, 58, 82, 107, 132
11: [16, 34, 57, 65],
12: [11, 25, 36, 45, 63],
13: [20, 31, 32, 70, 76, 98, 111],
14: [24],
15: [37, 83, 86, 135],
16: [38, 46, 48, 50, 54, 66, 72, 74, 100, 118],
17: [51, 75, 90, 108, 119, 125, 130, 144, 147],
18: [13],
19: [],
20: [],
21: [5, 18, 67, 73]
}
CATE_DICT = {}
for key, values in LABEL_DICT.items():
for v in values:
CATE_DICT[v] = key
def label_convert(filename,
semantic_dir,
ade_dir,
save_dir):
if filename.split('_')[1] == 'train':
semantic_mask = np.array(Image.open(
os.path.join(semantic_dir, 'training', filename + '.png')))
else:
semantic_mask = np.array(Image.open(
os.path.join(semantic_dir, 'validation', filename + '.png')))
h, w = semantic_mask.shape
ade_mask = np.array(Image.open(os.path.join(ade_dir, filename + '_seg.png')).resize((w, h)))
ade_classes = ade_mask[:, :, 0] / 10 * 256 + ade_mask[:, :, 1]
ade_instance = ade_mask[:, :, 2]
semantic_new = semantic_mask.copy()
semantic_id_list = list(np.unique(semantic_mask)[1:])
instance_mask = np.zeros((h, w), dtype=np.uint8)
for semantic_id in sorted(semantic_id_list):
ade_id = CATE_TO_ADE[semantic_id]
if len(ade_id) == 1:
instances = ade_instance[ade_classes == ade_id[0]]
else:
instances = ade_instance[(ade_classes == ade_id[0]) | (ade_classes == ade_id[1])]
for ins_id in np.unique(instances):
ins_mask = (ade_instance == ins_id).astype(np.uint8) * ins_id
instance_mask += ins_mask
for semantic_id in semantic_id_list:
if semantic_id in CATE_DICT.keys():
semantic_new[semantic_mask == semantic_id] = CATE_DICT[semantic_id]
else:
semantic_new[semantic_mask == semantic_id] = semantic_id
new_semantic_id_list = list(np.unique(semantic_new)[1:])
print('class id before merge:', new_semantic_id_list)
for label in new_semantic_id_list:
# TODO: 合并地板和地毯
if label == 29:
rugs = instance_mask[semantic_new == label]
ins_mask = instance_mask.copy()
for i in np.unique(rugs):
rug_flag = False
rug = (ins_mask == i).astype(np.uint8)
rug_dilate = cv2.dilate(rug, kernel=(3, 3))
if 1 in new_semantic_id_list:
floors = ins_mask[semantic_new == 1]
for ins_id in np.unique(floors):
floor = (ins_mask == ins_id).astype(np.uint8)
if np.sum(rug_dilate * floor) > 0:
semantic_new[ins_mask == i] = 1
instance_mask[ins_mask == i] = ins_id
floor = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(floor * 255).save(os.path.join(save_dir, 'mask_floor_{}.png'.format(ins_id)))
rug_flag = True
break
if not rug_flag or 1 not in new_semantic_id_list:
semantic_new[ins_mask == i] = 0
instance_mask[ins_mask == i] = 0
# TODO: 垫子归属床、凳子椅子还是沙发
elif label in [40, 58, 82, 107, 132]:
cushions = instance_mask[semantic_new == label]
ins_mask = instance_mask.copy()
for i in np.unique(cushions):
cushion_flag = False
cushion = (ins_mask == i).astype(np.uint8)
cushion_dilate = cv2.dilate(cushion, kernel=(3, 3))
if 10 in new_semantic_id_list:
beds = ins_mask[semantic_new == 10]
for ins_id in np.unique(beds):
bed = (ins_mask == ins_id).astype(np.uint8)
if np.sum(cushion_dilate * bed) > 0:
semantic_new[ins_mask == i] = 10
instance_mask[ins_mask == i] = ins_id
bed = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(bed * 255).save(os.path.join(save_dir, 'mask_bed_{}.png'.format(ins_id)))
cushion_flag = True
break
if 13 in new_semantic_id_list:
chairs = ins_mask[semantic_new == 13]
for ins_id in np.unique(chairs):
chair = (ins_mask == ins_id).astype(np.uint8)
if np.sum(cushion_dilate * chair) > 0:
semantic_new[ins_mask == i] = 13
instance_mask[ins_mask == i] = ins_id
chair = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(chair * 255).save(os.path.join(save_dir, 'mask_chair_{}.png'.format(ins_id)))
cushion_flag = True
break
if 14 in new_semantic_id_list:
sofas = ins_mask[semantic_new == 14]
for ins_id in np.unique(sofas):
sofa = (ins_mask == ins_id).astype(np.uint8)
if np.sum(cushion_dilate * sofa) > 0:
semantic_new[ins_mask == i] = 14
instance_mask[ins_mask == i] = ins_id
sofa = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(sofa * 255).save(os.path.join(save_dir, 'mask_sofa_{}.png'.format(ins_id)))
cushion_flag = True
break
if not cushion_flag or (10 not in new_semantic_id_list and
13 not in new_semantic_id_list and 14 not in new_semantic_id_list):
semantic_new[ins_mask == i] = 0
instance_mask[ins_mask == i] = 0
# TODO: 台面归属桌子还是柜子
elif label == 71:
countertops = instance_mask[semantic_new == label]
ins_mask = instance_mask.copy()
for i in np.unique(countertops):
countertop_flag = False
countertop = (ins_mask == i).astype(np.uint8)
countertop_dilate = cv2.dilate(countertop, kernel=(3, 3))
if 11 in new_semantic_id_list:
desks = ins_mask[semantic_new == 11]
for ins_id in np.unique(desks):
desk = (ins_mask == ins_id).astype(np.uint8)
if np.sum(countertop_dilate * desk) > 0:
semantic_new[ins_mask == i] = 11
instance_mask[ins_mask == i] = ins_id
desk = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(desk * 255).save(os.path.join(save_dir, 'mask_desk_{}.png'.format(ins_id)))
countertop_flag = True
break
if 12 in new_semantic_id_list:
cabinets = ins_mask[semantic_new == 12]
for ins_id in np.unique(cabinets):
cabinet = (ins_mask == ins_id).astype(np.uint8)
if np.sum(countertop_dilate * cabinet) > 0:
semantic_new[ins_mask == i] = 12
instance_mask[ins_mask == i] = ins_id
cabinet = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(cabinet * 255).save(os.path.join(save_dir, 'mask_cabinet_{}.png'.format(ins_id)))
countertop_flag = True
break
if not countertop_flag or (11 not in new_semantic_id_list and 12 not in new_semantic_id_list):
semantic_new[ins_mask == i] = 0
instance_mask[ins_mask == i] = 0
# TODO: 书籍归属床、桌子、柜子、凳子椅子、沙发
elif label == 68:
books = instance_mask[semantic_new == label]
ins_mask = instance_mask.copy()
for i in np.unique(books):
book_flag = False
book = (ins_mask == i).astype(np.uint8)
book_dilate = cv2.dilate(book, kernel=(3, 3))
for cls in [10, 11, 12, 13, 14]:
if cls in new_semantic_id_list:
ms = ins_mask[semantic_new == cls]
for ins_id in np.unique(ms):
m = (ins_mask == ins_id).astype(np.uint8)
if np.sum(book_dilate * m) > 0 and not book_flag:
semantic_new[ins_mask == i] = cls
instance_mask[ins_mask == i] = ins_id
m = (instance_mask == ins_id).astype(np.uint8)
Image.fromarray(m * 255).save(os.path.join(save_dir, 'mask_book_{}.png'.format(ins_id)))
book_flag = True
break
if not book_flag or (10 not in new_semantic_id_list and 11 not in new_semantic_id_list and
12 not in new_semantic_id_list and 13 not in new_semantic_id_list and
14 not in new_semantic_id_list):
semantic_new[ins_mask == i] = 0
instance_mask[ins_mask == i] = 0
elif label not in LABEL_DICT.keys():
mask = (semantic_new == label).astype(np.uint8) * 255
Image.fromarray(mask).save(os.path.join(save_dir, '{}.png'.format(label)))
semantic_new[semantic_new == label] = 0
instance_mask[semantic_new == label] = 0
print('class id after merge:', np.unique(semantic_new)[1:])
assert max(np.unique(semantic_new)) < 22
for label in np.unique(semantic_new)[1:]:
mask = (semantic_new == label).astype(np.uint8) * 255
Image.fromarray(mask).save(os.path.join(save_dir, 'label_{}.png'.format(label)))
final_mask = np.stack([semantic_new, instance_mask], axis=-1)
Image.fromarray(final_mask).save(os.path.join(save_dir, 'final_mask.png'))
if __name__ == '__main__':
root = '/Users/dyy/Desktop/ADE20k'
semantic_dir = os.path.join(root, 'ADEChallengeData2016/annotations')
ade_dir = os.path.join(root, 'raw_seg2')
# if not os.path.exists(ade_dir):
# os.makedirs(ade_dir)
with open('indoor_images2.txt', 'r') as f:
images = f.readlines()
# images = [name.strip() for name in images]
images = ['ADE_val_00001372']
# ade_masks = glob.glob('/Users/dyy/Desktop/datasets/ADE20k/ADE20K_2016_07_26/images/*/*/*/*_seg.png')
# # ade_masks = glob.glob('/Users/dyy/Desktop/datasets/ADE20k/ADE20K_2016_07_26/images/*/*/*/*/*_seg.png')
# for path in ade_masks:
# print(path)
# name = path.split('/')[-1]
# if name.replace('_seg.png', '') in images:
# shutil.copy(path, os.path.join(ade_dir, name))
for i, filaname in enumerate(images):
print(i, filaname)
save_dir = os.path.join(root, 'indoor2', filaname)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
label_convert(filaname, semantic_dir, ade_dir, save_dir)
# try:
# label_convert(filaname, semantic_dir, ade_dir, save_dir)
# except Exception as e:
# print(e)
|
# 2.8 多行匹配模式
import re
comment = re.compile(r'/\*(.*?)\*/')
text1 = '/* this is a comment */'
text2 = ''' /* this is a
multiline comment */
'''
print(comment.findall(text1))
print(comment.findall(text2))
comment = re.compile(r'/\*((?:.|\n)*?)\*/')
print(comment.findall(text2))
comment = re.compile(r'/\*(.*?)\*/', re.DOTALL)
print(comment.findall(text2))
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""ComboBox dropped height Test
**What is checked**
It is ensured that the height of the list displayed when the combobox is
dropped down is not less than the height of the reference.
**How is it checked**
The value for the dropped rectangle can be retrieved from windows. The height
of this rectangle is calculated and compared against the reference height.
**When is a bug reported**
If the height of the dropped rectangle for the combobox being checked is less
than the height of the reference one then a bug is reported.
**Bug Extra Information**
There is no extra information associated with this bug type
**Is Reference dialog needed**
The reference dialog is necessary for this test.
**False positive bug reports**
No false bugs should be reported. If the font of the localised control has a
smaller height than the reference then it is possible that the dropped
rectangle could be of a different size.
**Test Identifier**
The identifier for this test/bug is "ComboBoxDroppedHeight"
"""
testname = "ComboBoxDroppedHeight"
def ComboBoxDroppedHeightTest(windows):
"""Check if each combobox height is the same as the reference"""
bugs = []
for win in windows:
if not win.ref:
continue
if win.class_name() != "ComboBox" or win.ref.class_name() != "ComboBox":
continue
if win.dropped_rect().height() != win.ref.dropped_rect().height():
bugs.append((
[win, ],
{},
testname,
0,)
)
return bugs
|
#importing the Kratos Library
from Kratos import *
from KratosULFApplication import *
from KratosStructuralApplication import *
from KratosMeshingApplication import *
#import time
def AddVariables(model_part):
model_part.AddNodalSolutionStepVariable(PRESSURE);
model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
model_part.AddNodalSolutionStepVariable(VELOCITY);
model_part.AddNodalSolutionStepVariable(ACCELERATION);
model_part.AddNodalSolutionStepVariable(POSITIVE_FACE_PRESSURE);
model_part.AddNodalSolutionStepVariable(DENSITY);
model_part.AddNodalSolutionStepVariable(VISCOSITY);
model_part.AddNodalSolutionStepVariable(NODAL_AREA);
model_part.AddNodalSolutionStepVariable(BODY_FORCE);
model_part.AddNodalSolutionStepVariable(FORCE);
model_part.AddNodalSolutionStepVariable(IS_FLUID);
model_part.AddNodalSolutionStepVariable(IS_INTERFACE);
model_part.AddNodalSolutionStepVariable(IS_STRUCTURE);
model_part.AddNodalSolutionStepVariable(IS_BOUNDARY);
model_part.AddNodalSolutionStepVariable(IS_FREE_SURFACE);
model_part.AddNodalSolutionStepVariable(IS_LAGRANGIAN_INLET);
model_part.AddNodalSolutionStepVariable(BULK_MODULUS);
model_part.AddNodalSolutionStepVariable(NODAL_H);
model_part.AddNodalSolutionStepVariable(NORMAL);
def AddDofs(model_part):
for node in model_part.Nodes:
#adding dofs
node.AddDof(DISPLACEMENT_X);
node.AddDof(DISPLACEMENT_Y);
node.AddDof(DISPLACEMENT_Z);
node.AddDof(IS_STRUCTURE);
class ULF_FSISolver:
def __init__(self, fluid_model_part, structure_model_part, combined_model_part, box_corner1,box_corner2, domain_size, gid_io):
self.domain_size=domain_size;
self.echo_level = 0
self.gid_io=gid_io
self.tttt=0
#saving the different model parts
self.combined_model_part = combined_model_part; #contains both structure and fluid
self.fluid_model_part = fluid_model_part; #contains only fluid elements
self.structure_model_part = structure_model_part; #contains only structural elements
#time integration scheme
damp_factor = -0.3
self.time_scheme = ResidualBasedPredictorCorrectorBossakScheme(damp_factor)
#definition of the solvers
# self.model_linear_solver = SkylineLUFactorizationSolver()
pDiagPrecond = DiagonalPreconditioner()
self.model_linear_solver = BICGSTABSolver(1e-8, 5000,pDiagPrecond)
#definition of the convergence criteria
self.conv_criteria = DisplacementCriteria(1e-6,1e-9)
self.pressure_calculate_process = PressureCalculateProcess(fluid_model_part,domain_size);
self.ulf_apply_bc_process = UlfApplyBCProcess(fluid_model_part);
self.ulf_time_step_dec_process = UlfTimeStepDecProcess(fluid_model_part);
self.mark_fluid_process = MarkFluidProcess(fluid_model_part);
self.mark_close_nodes_process = MarkCloseNodesProcess(fluid_model_part);
self.mark_outer_nodes_process = MarkOuterNodesProcess(fluid_model_part);
self.node_erase_process = NodeEraseProcess(fluid_model_part);
#tools to save and merge the structural contributions
self.save_structure_model_part_process = SaveStructureModelPartProcess();
self.save_structure_conditions_process = SaveStructureConditionsProcess();
self.merge_model_parts_process = MergeModelPartsProcess();
###temporary ... i need it to calculate the nodal area
self.UlfUtils = UlfUtils()
#self.save_structural_elements
self.alpha_shape = 1.5;
self.h_multiplier = 0.3
##saving the limits of the box (all the nodes external to this will be erased)
self.box_corner1 = box_corner1
self.box_corner2 = box_corner2
if(domain_size == 2):
self.Mesher = TriGenPFEMModeler()
self.combined_neigh_finder = FindNodalNeighboursProcess(combined_model_part,9,18)
self.fluid_neigh_finder = FindNodalNeighboursProcess(fluid_model_part,9,18)
elif (domain_size == 3):
#self.Mesher = TetGenModeler()
#improved mesher
self.Mesher = TetGenPfemModeler()
self.combined_neigh_finder = FindNodalNeighboursProcess(combined_model_part,20,30)
self.fluid_neigh_finder = FindNodalNeighboursProcess(fluid_model_part,20,30)
print "after reading all the model contains:"
print self.fluid_model_part
#detect initial size distribution - note that initially the fluid model part contains
#all the elements of both structure and fluid ... this is only true after reading the input
(self.fluid_neigh_finder).Execute();
Hfinder = FindNodalHProcess(fluid_model_part);
Hfinder.Execute();
#######################################################################
#delta time estimation based on the non-negativity of the jacobian
def EstimateDeltaTime(self,max_dt,domain_size):
#return (self.UlfUtils).EstimateDeltaTime(min_dt,max_dt,self.combined_model_part)
return (self.ulf_time_step_dec_process).EstimateDeltaTime(max_dt,domain_size)
#######################################################################
def Initialize(self):
#creating the solution strategy
CalculateReactionFlag = False
ReformDofSetAtEachStep = True
MoveMeshFlag = True
import ulf_strategy_python
self.solver = ulf_strategy_python.ULFStrategyPython(self.combined_model_part,self.time_scheme,self.model_linear_solver,self.conv_criteria,CalculateReactionFlag,ReformDofSetAtEachStep,MoveMeshFlag,self.domain_size)
print "self.echo_level = " , self.echo_level
(self.solver).SetEchoLevel(self.echo_level)
print "finished initialization of the fluid strategy"
#saving the structural elements
(self.mark_fluid_process).Execute(); #we need this before saving the structrural elements
print "Saving STRUCTURE"
(self.save_structure_model_part_process).SaveStructure(self.fluid_model_part, self.structure_model_part, self.domain_size);
(self.save_structure_conditions_process).SaveStructureConditions(self.fluid_model_part, self.structure_model_part, self.domain_size);
#marking the fluid
(self.fluid_neigh_finder).Execute();
(self.ulf_apply_bc_process).Execute();
(self.mark_fluid_process).Execute();
#remeshing before the first solution
self.Remesh();
######################################################################
def CheckForInvertedElements(self):
#volume = (self.UlfUtils).CalculateVolume(self.combined_model_part,self.domain_size)
volume = (self.UlfUtils).CalculateVolume(self.fluid_model_part,self.domain_size)
inverted_elements = False
if(volume < 0.0):
volume = - volume
inverted_elements = True
return [inverted_elements,volume]
#######################################################################
def Solve(self):
print "solving the fluid problem"
inverted_elements = (self.solver).Solve(self.domain_size,self.UlfUtils)
print "succesful solution of the fluid "
reduction_factor = 0.5
max_reduction_steps = 5
time_reduction_step = 0
while(inverted_elements == True and time_reduction_step <= max_reduction_steps):
print " *************************************************** "
print "inverted element found ... reducing the time step"
(self.UlfUtils).ReduceTimeStep(self.combined_model_part,reduction_factor);
(self.UlfUtils).ReduceTimeStep(self.fluid_model_part,reduction_factor);
(self.UlfUtils).ReduceTimeStep(self.structure_model_part,reduction_factor);
print "reduction_step = ", time_reduction_step
time_reduction_step = time_reduction_step + 1
#copying vars from the old step
## for node in (self.combined_model_part).Nodes:
## pold = node.GetSolutionStepValue(PRESSURE,1);
## dispold = node.GetSolutionStepValue(DISPLACEMENT,1);
## velold = node.GetSolutionStepValue(VELOCITY,1);
## accold = node.GetSolutionStepValue(ACCELERATION,1);
##
## node.SetSolutionStepValue(PRESSURE,0,pold);
## node.SetSolutionStepValue(DISPLACEMENT,0,dispold);
## node.SetSolutionStepValue(VELOCITY,0,velold);
## node.SetSolutionStepValue(ACCELERATION,0,accold);
self.solver.MoveMesh()
print "time step reduction completed"
print " *************************************************** "
(self.solver).Solve(self.domain_size,self.UlfUtils)
[inverted_elements,vol] = self.CheckForInvertedElements()
if(inverted_elements == True):
print "***********************************************************************"
print "***********************************************************************"
print "CRITICAL: ... element is still inverted after reducing the time step"
print "***********************************************************************"
print "***********************************************************************"
factor = 2.0**5 #this is the original time step
(self.UlfUtils).ReduceTimeStep(self.combined_model_part,factor);
(self.UlfUtils).ReduceTimeStep(self.fluid_model_part,factor);
(self.UlfUtils).ReduceTimeStep(self.structure_model_part,factor);
## for node in (self.combined_model_part).Nodes:
## pold = node.GetSolutionStepValue(PRESSURE,1);
## dispold = node.GetSolutionStepValue(DISPLACEMENT,1);
## velold = node.GetSolutionStepValue(VELOCITY,1);
## accold = node.GetSolutionStepValue(ACCELERATION,1);
##
## node.SetSolutionStepValue(PRESSURE,0,pold);
## node.SetSolutionStepValue(DISPLACEMENT,0,dispold);
## node.SetSolutionStepValue(VELOCITY,0,velold);
## node.SetSolutionStepValue(ACCELERATION,0,accold);
self.solver.MoveMesh()
print "advancing in time without doing anything..."
(self.solver).PredictionStep(self.domain_size,self.UlfUtils)
#print "pressure contribution process" - to be executed using exclusively fluid elements
#and neighbouring relationships
(self.fluid_neigh_finder).Execute();
(self.UlfUtils).CalculateNodalArea(self.fluid_model_part,self.domain_size);
(self.pressure_calculate_process).Execute();
#print "remeshing"
self.Remesh();
######################################################################
def Remesh(self):
#(self.UlfUtils).MarkNodesCloseToFS(self.fluid_model_part, 2)
##erase all conditions and elements prior to remeshing
((self.combined_model_part).Elements).clear();
((self.combined_model_part).Conditions).clear();
((self.combined_model_part).Nodes).clear();
((self.fluid_model_part).Elements).clear();
((self.fluid_model_part).Conditions).clear();
#and erase bad nodes
#(self.mark_close_nodes_process).MarkCloseNodes(self.h_multiplier);
#(self.mark_outer_nodes_process).MarkOuterNodes(self.box_corner1, self.box_corner2);
#(self.node_erase_process).Execute();
##remesh CHECK for 3D or 2D
if (self.domain_size == 2):
(self.Mesher).ReGenerateUpdatedLagrangian(self.fluid_model_part, self.node_erase_process, self.alpha_shape)
#(self.Mesher).ReGenerateUpdatedLagrangian(self.fluid_model_part,self.alpha_shape)
elif (self.domain_size == 3):
#(self.Mesher).ReGenerateUpdatedLagrangian3D(self.fluid_model_part,self.alpha_shape)
#improved qaulity mesher
(self.Mesher).ReGenerateMeshPfemUlf3D(self.fluid_model_part,self.alpha_shape)
##calculating fluid neighbours before applying boundary conditions
(self.fluid_neigh_finder).Execute();
## (self.UlfUtils).CalculateNodalArea(self.fluid_model_part,self.domain_size);
#print "marking fluid" and applying fluid boundary conditions
(self.ulf_apply_bc_process).Execute();
(self.mark_fluid_process).Execute();
#merging the structural elements back (they are saved in the Initialize)
(self.merge_model_parts_process).MergeParts(self.fluid_model_part, self.structure_model_part, self.combined_model_part);
#calculating the neighbours for the overall model
(self.combined_neigh_finder).Execute();
#(self.UlfUtils).CalculateNodalArea(self.fluid_model_part,self.domain_size);
## for elem in self.combined_model_part.Elements:
## print elem
print "end of remesh fucntion"
######################################################################
def FindNeighbours(self):
(self.neigh_finder).Execute();
|
import os
import json
import time
import requests
import datetime
import numpy as np
from PIL import Image
from io import BytesIO
import tensorflow as tf
# azureml imports
from azureml.core.model import Model
def init():
global model, image_size, index, categories
try:
path = Model.get_model_path('seer')
except:
path = 'data/model'
model_path = os.path.join(path, 'model.hdf5')
meta_path = os.path.join(path, 'metadata.json')
print('Loading {}'.format(meta_path))
with open(meta_path) as f:
metadata = json.load(f)
for i in metadata:
print('{} => {}'.format(i, metadata[i]))
image_size = metadata['image_size']
index = metadata['index']
categories = metadata['categories']
print('Attempting to load model')
model = tf.keras.models.load_model(model_path)
model.summary()
print('Done!')
print('Initialized model "{}" at {}'.format(model_path, datetime.datetime.now()))
def process_image(path):
global image_size
# Extract image (from web or path)
if(path.startswith('http')):
response = requests.get(path)
img = np.array(Image.open(BytesIO(response.content)))
else:
img = np.array(Image.open(path))
img_tensor = tf.convert_to_tensor(img, dtype=tf.float32)
img_final = tf.image.resize(img_tensor, [image_size, image_size]) / 255
return img_final
def run(raw_data):
global model, image_size, index, categories
prev_time = time.time()
post = json.loads(raw_data)
# get image
img_path = post['image']
tensor = process_image(img_path)
t = tf.reshape(tensor, [-1, image_size, image_size, 3])
# predict with model (there's only one)
pred = model.predict(t, steps=1)[0]
print(pred)
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
predictions = {}
for i in range(len(pred)):
predictions[categories[i]] = str(pred[i])
payload = {
'time': str(inference_time.total_seconds()),
'prediction': categories[int(np.argmax(pred))],
'scores': predictions
}
print('Input ({}),\nPrediction ({})'.format(post['image'], payload))
return payload
if __name__ == '__main__':
import pprint
init()
tacos = 'https://lh3.googleusercontent.com/-UT5H8nPflkQ/T4tqueyhb_I/AAAAAAAAGl8/1FP7G__Zuys/s640/Lentil+Tacos+close.jpg'
burrito = 'https://www.exploreveg.org/files/2015/05/sofritas-burrito.jpeg'
print('\n---------------------\nInference with tacos:')
t = run(json.dumps({'image': tacos}))
json.dumps(t)
print('\n=======================')
pprint.pprint(t)
print('\n\n\nInference with burrito:')
b = run(json.dumps({'image': burrito}))
print('\n=======================')
pprint.pprint(b)
|
#
# lajollaS
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.99983, 0.99974, 0.79991],
[0.10023, 0.10091, 0.0037913],
[0.86973, 0.45582, 0.31068],
[0.94802, 0.76354, 0.37498],
[0.49809, 0.23315, 0.20576],
[0.98278, 0.91253, 0.57381],
[0.91381, 0.60672, 0.32439],
[0.28496, 0.16607, 0.1061],
[0.72238, 0.30998, 0.27958],
[0.1874, 0.13354, 0.062323],
[0.9931, 0.95921, 0.68789],
[0.96754, 0.84738, 0.4604],
[0.9298, 0.68157, 0.33699],
[0.61029, 0.26633, 0.24848],
[0.389, 0.1997, 0.1555],
[0.89674, 0.53312, 0.31778],
[0.81242, 0.37438, 0.29885],
[0.55391, 0.24948, 0.22849],
[0.14225, 0.11735, 0.03474],
[0.90574, 0.57009, 0.32091],
[0.95797, 0.80656, 0.41183],
[0.44303, 0.2166, 0.18105],
[0.97589, 0.88298, 0.51602],
[0.93853, 0.72144, 0.35096],
[0.88549, 0.4952, 0.31451],
[0.66685, 0.28556, 0.26546],
[0.8463, 0.41502, 0.30567],
[0.99693, 0.97973, 0.74399],
[0.98842, 0.93731, 0.63126],
[0.23526, 0.1497, 0.083951],
[0.33625, 0.18278, 0.13021],
[0.92169, 0.64362, 0.32917],
[0.77392, 0.34151, 0.29098],
[0.83077, 0.3945, 0.30251],
[0.47045, 0.2249, 0.1936],
[0.93407, 0.70122, 0.34297],
[0.89151, 0.51432, 0.3162],
[0.9719, 0.86595, 0.48769],
[0.16445, 0.12548, 0.04964],
[0.87836, 0.47572, 0.31269],
[0.85918, 0.43556, 0.30835],
[0.96286, 0.82747, 0.4349],
[0.99087, 0.94851, 0.65967],
[0.99512, 0.96958, 0.71599],
[0.31043, 0.17441, 0.11793],
[0.41587, 0.20818, 0.16831],
[0.95298, 0.7851, 0.39176],
[0.211, 0.14161, 0.073581],
[0.63859, 0.2755, 0.25734],
[0.69488, 0.29697, 0.27286],
[0.12086, 0.1092, 0.019283],
[0.99851, 0.98977, 0.77195],
[0.90983, 0.58841, 0.32257],
[0.90141, 0.55169, 0.31934],
[0.74891, 0.32481, 0.28559],
[0.98573, 0.92539, 0.60264],
[0.52592, 0.24134, 0.21742],
[0.97951, 0.89846, 0.54486],
[0.58204, 0.25775, 0.23883],
[0.36245, 0.19124, 0.14274],
[0.92569, 0.66242, 0.33255],
[0.94319, 0.74225, 0.36147],
[0.91775, 0.62509, 0.32653],
[0.25991, 0.15783, 0.094764],
[0.79686, 0.35981, 0.2957],
[0.95049, 0.7743, 0.38296],
[0.70873, 0.30323, 0.27631],
[0.86473, 0.44572, 0.30956],
[0.90781, 0.57926, 0.32172],
[0.96523, 0.83758, 0.44738],
[0.29764, 0.17023, 0.11196],
[0.27237, 0.16197, 0.10032],
[0.94083, 0.73178, 0.35587],
[0.7358, 0.31716, 0.28268],
[0.42941, 0.2124, 0.17473],
[0.15323, 0.12139, 0.042482],
[0.22307, 0.14566, 0.078687],
[0.91971, 0.63434, 0.32777],
[0.98119, 0.90566, 0.55934],
[0.4567, 0.22079, 0.18738],
[0.98967, 0.94298, 0.64549],
[0.97775, 0.89091, 0.53041],
[0.91577, 0.61589, 0.32541],
[0.34931, 0.18702, 0.13639],
[0.82195, 0.38436, 0.30075],
[0.78568, 0.35049, 0.29341],
[0.19909, 0.13759, 0.068084],
[0.85306, 0.42531, 0.30705],
[0.99921, 0.99476, 0.78593],
[0.48423, 0.22906, 0.1997],
[0.76164, 0.33295, 0.28837],
[0.89913, 0.54243, 0.31856],
[0.95548, 0.79587, 0.40139],
[0.94558, 0.75284, 0.36782],
[0.88862, 0.50481, 0.31537],
[0.96976, 0.85685, 0.47387],
[0.99606, 0.97467, 0.73],
[0.97394, 0.87466, 0.50176],
[0.93191, 0.69133, 0.33977],
[0.96044, 0.81711, 0.42301]]
lajollaS_map = LinearSegmentedColormap.from_list('lajollaS', cm_data)
# For use of "viscm view"
test_cm = lajollaS_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(lajollaS_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=lajollaS_map)
plt.show()
|
from .base import BaseRunner
from rlutils.replay_buffers import GAEBuffer
import rlutils.infra as rl_infra
class OnPolicyRunner(BaseRunner):
def setup_logger(self, config, tensorboard=False):
super(OnPolicyRunner, self).setup_logger(config=config, tensorboard=tensorboard)
self.sampler.set_logger(self.logger)
self.updater.set_logger(self.logger)
def setup_replay_buffer(self, max_length, gamma, lam):
self.replay_buffer = GAEBuffer.from_vec_env(self.env, max_length=max_length, gamma=gamma, lam=lam)
def setup_sampler(self, num_steps):
self.num_steps = num_steps
self.sampler = rl_infra.samplers.TrajectorySampler(env=self.env)
def setup_updater(self):
self.updater = rl_infra.OnPolicyUpdater(agent=self.agent, replay_buffer=self.replay_buffer)
def run_one_step(self, t):
self.sampler.sample(num_steps=self.num_steps,
collect_fn=(self.agent.act_batch, self.agent.value_net.predict),
replay_buffer=self.replay_buffer)
self.updater.update(self.global_step)
def on_epoch_end(self, epoch):
self.logger.log_tabular('Epoch', epoch)
self.logger.dump_tabular()
def on_train_begin(self):
self.sampler.reset()
self.updater.reset()
self.timer.start()
@classmethod
def main(cls, env_name, env_fn=None, seed=0, num_parallel_envs=5, agent_cls=None, agent_kwargs={},
batch_size=5000, epochs=200, gamma=0.99, lam=0.97, logger_path: str = None):
# Instantiate environment
assert batch_size % num_parallel_envs == 0
num_steps_per_sample = batch_size // num_parallel_envs
config = locals()
runner = cls(seed=seed, steps_per_epoch=1,
epochs=epochs, exp_name=None, logger_path=logger_path)
runner.setup_env(env_name=env_name, env_fn=env_fn, num_parallel_env=num_parallel_envs,
asynchronous=False, num_test_episodes=None)
runner.setup_agent(agent_cls=agent_cls, **agent_kwargs)
runner.setup_replay_buffer(max_length=num_steps_per_sample, gamma=gamma, lam=lam)
runner.setup_sampler(num_steps=num_steps_per_sample)
runner.setup_updater()
runner.setup_logger(config)
runner.run()
|
from os import environ as env
from .utils import DOES_NOT_EXIST
API_REPLAY_RETRIES = int(env.get('API_REPLAY_RETRIES', 10))
API_REPLAY_BASE = int(env.get('API_REPLAY_BASE', 2))
DEBUG = env.get('DEBUG', True)
REDIS_URL = env.get('REDIS_URL', DOES_NOT_EXIST)
RQ_DEFAULT_URL = env.get('RQ_DEFAULT_URL', DOES_NOT_EXIST)
SECRET_KEY = env.get('SECRET_KEY', DOES_NOT_EXIST)
|
"""uuid format to Meeting.id
Revision ID: e541ec33fc8b
Revises: 8005dddb1ef3
Create Date: 2021-07-11 11:26:54.447335
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'e541ec33fc8b'
down_revision = '8005dddb1ef3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
#!/usr/bin/env python
# Copyright (c) 2017, Analog Devices Inc.
# All right reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: Ryosuke Tajima
import math
import unittest
import rospy
import rostest
import time
from collections import deque
from sensor_msgs.msg import Imu
def imu_get_min(imu, a, b):
imu.angular_velocity.x = min(a.angular_velocity.x, b.angular_velocity.x)
imu.angular_velocity.y = min(a.angular_velocity.y, b.angular_velocity.y)
imu.angular_velocity.z = min(a.angular_velocity.z, b.angular_velocity.z)
imu.linear_acceleration.x = min(a.linear_acceleration.x, b.linear_acceleration.x)
imu.linear_acceleration.y = min(a.linear_acceleration.y, b.linear_acceleration.y)
imu.linear_acceleration.z = min(a.linear_acceleration.z, b.linear_acceleration.z)
def imu_get_max(imu, a, b):
imu.angular_velocity.x = max(a.angular_velocity.x, b.angular_velocity.x)
imu.angular_velocity.y = max(a.angular_velocity.y, b.angular_velocity.y)
imu.angular_velocity.z = max(a.angular_velocity.z, b.angular_velocity.z)
imu.linear_acceleration.x = max(a.linear_acceleration.x, b.linear_acceleration.x)
imu.linear_acceleration.y = max(a.linear_acceleration.y, b.linear_acceleration.y)
imu.linear_acceleration.z = max(a.linear_acceleration.z, b.linear_acceleration.z)
class TestImu(unittest.TestCase):
@classmethod
def setUpClass(cls):
rospy.init_node('test_imu')
def setUp(self):
self.imu_raw_count = 0
self.imu_count = 0
self.imu_raw_data = deque(maxlen=100)
self.imu_data = deque(maxlen=100)
rospy.Subscriber('/imu/data_raw', Imu, self.cb_imu_raw, queue_size=1000)
rospy.Subscriber('/imu/data', Imu, self.cb_imu, queue_size=1000)
def cb_imu_raw(self, msg):
self.imu_raw_count += 1
self.imu_raw_data.append(msg)
def cb_imu(self, msg):
self.imu_count += 1
self.imu_data.append(msg)
def test_imu_raw(self):
time.sleep(1.0)
# Check data count
self.assertTrue(self.imu_raw_count>0, 'No data received from /imu/data_raw')
# Check orientation
for imu in self.imu_raw_data:
self.assertAlmostEqual(imu.orientation.x, 0)
self.assertAlmostEqual(imu.orientation.y, 0)
self.assertAlmostEqual(imu.orientation.z, 0)
self.assertAlmostEqual(imu.orientation.w, 1)
for cov in imu.orientation_covariance:
self.assertAlmostEqual(cov, 0)
# Check angular velocity
for imu in self.imu_raw_data:
self.assertTrue(abs(imu.angular_velocity.x) < 0.1);
self.assertTrue(abs(imu.angular_velocity.y) < 0.1);
self.assertTrue(abs(imu.angular_velocity.z) < 0.1);
for cov in imu.angular_velocity_covariance:
self.assertAlmostEqual(cov, 0)
# Check linear_acceleration with gravity (CAUTION: test will fail in space)
for imu in self.imu_raw_data:
accl = math.sqrt(imu.linear_acceleration.x**2 + imu.linear_acceleration.y**2 + imu.linear_acceleration.z**2)
self.assertTrue(accl > 8.0)
self.assertTrue(accl < 11.0)
def test_imu(self):
time.sleep(1.0)
# Check data count
self.assertTrue(self.imu_count>0, 'No data received from /imu/data')
# Check orientation
for imu in self.imu_data:
quat = math.sqrt(imu.orientation.x**2 + imu.orientation.y**2 + imu.orientation.z**2 + imu.orientation.w**2)
self.assertAlmostEqual(quat, 1, delta=0.001)
# Check angular velocity
for imu in self.imu_data:
self.assertTrue(abs(imu.angular_velocity.x) < 0.1);
self.assertTrue(abs(imu.angular_velocity.y) < 0.1);
self.assertTrue(abs(imu.angular_velocity.z) < 0.1);
for cov in imu.angular_velocity_covariance:
self.assertAlmostEqual(cov, 0)
# Check linear_acceleration with gravity (CAUTION: test will fail in space)
for imu in self.imu_data:
accl = math.sqrt(imu.linear_acceleration.x**2 + imu.linear_acceleration.y**2 + imu.linear_acceleration.z**2)
self.assertTrue(accl > 8.0)
self.assertTrue(accl < 11.0)
if __name__ == '__main__':
rostest.rosrun('adi_driver', 'test_adi_driver', TestImu)
|
"""Basic key mapping.
Originally written by lunixbochs, version taken from the knausj_talon repo:
https://github.com/knausj85/knausj_talon/blob/d330a6eb1fbfcc13f99a732a097f220fd0c10950/code/keys.py
"""
from typing import Set, List
from talon import Module, Context, actions
from user.utils import dictify, multi_map, spoken_form
insert = actions.insert
default_alphabet = "air bat cap drum each fine gust harp sit jury crunch look made near odd pit quench red sun trap urge vest whale plex yank zip"
# My setup has trouble with some words. Probably my accent.
modified_alphabet = (
default_alphabet
#
# .replace("air", "arch")
.replace("air", "aim")
#
# .replace("bat", "batch")
#
.replace("harp", "hip")
#
# .replace("zip", "zen")
)
# chosen_alphabet = default_alphabet.split(" ")
chosen_alphabet = modified_alphabet.split(" ")
letters_string = "abcdefghijklmnopqrstuvwxyz"
# TODO: Use digits in number.py?
default_digits = "zero one two three four five six seven eight nine".split(" ")
ints = [str(i) for i in range(10)]
mod = Module()
mod.list("letter", desc="The spoken phonetic alphabet")
mod.list("symbol", desc="All symbols from the keyboard")
mod.list("arrow", desc="All arrow keys")
mod.list("standalone_arrow", desc="Arrow keys that can be spoken on their own")
mod.list("number", desc="All number keys")
mod.list("modifier", desc="All modifier keys")
mod.list("special", desc="All special keys")
mod.list("complex_symbol", desc="Symbols that take multiple characters, e.g. := or ->")
ctx = Context()
ctx.lists["self.modifier"] = {
"command": "cmd",
"control": "ctrl",
"troll": "ctrl",
"shift": "shift",
"schiff": "shift",
"ship": "shift",
"alt": "alt",
"option": "alt",
"windows": "super",
"super": "super",
}
ctx.lists["self.letter"] = dict(zip(chosen_alphabet, letters_string))
symbols = multi_map(
{
("back tick", "grave"): "`",
("comma", "camma"): ",",
("dot", "period", "full stop", "stop"): ".",
("semicolon", "semi"): ";",
("apostrophe", "post", "poess"): "'",
(
"speech mark",
"speech",
# Quote now starts a dictated quote
# "quote",
): '"',
# FIXME: slash and blash recognition conflicts
("forward slash", "slash"): "/",
("backslash", "blash"): "\\",
("minus", "dash"): "-",
(
"equals",
# "eek",
"quals",
"quills", # w2l
"qual",
): "=",
"plus": "+",
("question mark", "question", "quest"): "?",
"tilde": "~",
("exclamation", "bang"): "!",
("dollar sign", "dollar"): "$",
("underscore", "score"): "_",
("colon", "coal"): ":",
("asterisk", "star"): "*",
# "pound": "#",
"hash": "#",
"percent": "%",
"caret": "^",
"at sign": "@",
("ampersand", "amper"): "&",
"pipe": "|",
# Currency
"dollar": "$",
"pound": "£",
"euro": "€", # FIXME: comes out as "4"
# Brackets
("left square", "lack"): "[",
("right square", "rack"): "]",
("left paren", "lub"): "(",
("right paren", "rub"): ")",
("left brace", "lace"): "{",
("right brace", "race"): "}",
("left angle", "langle"): "<",
("right angle", "rangle"): ">",
("space", "gap", "pat"): " ",
# Special
"new line": "\n",
# Multi-char Symbols
# TODO: Extract these into a separate list
"quiver": "= =", # E[quiva]lent
"trip eek": "= = =",
("walrus", "wally"): ": =",
"rittoe": "- >",
"leffoe": "< -",
"riteek": "= >",
"leffeek": "< =",
# TODO: Probably a better name for this
"box": ": :",
}
)
# Spaced versions, e.g. "coalgap", "camgap"
symbols.update({f"{key}gap": f"{val} space" for key, val in symbols.items()})
ctx.lists["self.symbol"] = symbols
ctx.lists["self.number"] = dict(zip(default_digits, ints))
basic_arrows = {
"left": "left",
# Allow dropping the "t" in left
"leff": "left",
"right": "right",
"down": "down",
}
ctx.lists["self.arrow"] = {
#
**basic_arrows,
"up": "up",
}
ctx.lists["self.standalone_arrow"] = {
#
**basic_arrows,
"pup": "up",
}
# TODO: Merge these into one dictionary, use multi map
simple_keys = dictify(
[
#
"tab",
"escape",
# "enter",
"pageup",
"pagedown",
"backspace",
"delete",
"home",
# Interferes with dictation
# "end",
]
)
alternate_keys = multi_map(
{
# b[ackward k]ill
("bill", "bin"): "backspace",
# f[orward k]ill
("fill", "fin"): "delete",
"scape": "escape",
"knock": "end",
# "home" is unreliable and requires a lot of "h" sound - tiring
"con": "home",
# Don't use "return" because it's a common programming keyword.
("slap", "lip"): "enter",
# TODO: Extract compound keys, shouldn't really be here
"squares": "[ ] left",
"parens": "( ) left",
"braces": "{ } left",
"angles": "< > left",
# TODO: Audit this with w2l once off Dragon. "loon" may be better
# "loon": "end enter",
"break": "end enter",
"backtab": "shift-tab",
}
)
f_keys = {
# Auto-generate 1-9
**{spoken_form(f"F {i}"): f"{i}" for i in range(1, 9)},
"F ten": "f10",
"F eleven": "f11",
"F twelve": "f12",
}
keys = {**simple_keys, **alternate_keys, **f_keys}
ctx.lists["self.special"] = keys
complex_symbols = multi_map(
{
("walrus", "wally"): ":=",
"rittoe": "->",
"leffoe": "<-",
"riteek": "=>",
"leffeek": "<=",
}
)
ctx.lists["self.complex_symbol"] = complex_symbols
@mod.capture(rule="{self.modifier}+")
def modifiers(m) -> Set[str]:
"""One or more modifier keys"""
return set(m["modifier_list"])
@mod.capture(rule="{self.arrow}")
def arrow(m) -> str:
"""One directional arrow key"""
return m.arrow
@mod.capture(rule="<self.arrow>+")
def arrows(m) -> str:
"""One or more arrows separate by a space"""
return m.arrow_list
@mod.capture(rule="{self.standalone_arrow}")
def standalone_arrow(m) -> str:
"""One arrow that can be spoken on its own (without modifiers).
Standalone arrows are separated to avoid "up" being misrecognized.
"""
return m.standalone_arrow
@mod.capture(rule="(numb | num) <digits>")
def number_key(m) -> str:
"""One number key"""
return str(m.digits)
@mod.capture(rule="(numb | num) <number>")
def number_keys(m) -> str:
"""Multiple number keys"""
return str(m.number)
@mod.capture(rule="{self.letter}")
def letter(m) -> str:
"""One letter key"""
return m.letter
@mod.capture(rule="{self.letter}+ | sky {self.letter}+ [ship]")
def letters(m) -> str:
"""Multiple letter keys, as one string (no spaces)."""
string = "".join(m.letter_list)
if m[0] == "sky":
string = string.upper()
return string
@mod.capture(rule="{self.symbol}")
def symbol(m) -> str:
"""One symbol key"""
return m.symbol
@mod.capture(rule="{self.special}")
def special(m) -> str:
"""One special key"""
return m.special
@mod.capture(
rule="(<self.arrow> | <self.digit> | <self.letter> | <self.special> | <self.symbol>)"
)
def any_key(m) -> str:
"""Any single key"""
return str(m[0])
@mod.capture(rule="{self.modifier}+ <self.any_key>")
def keychord(m) -> str:
"""A single key with modifiers"""
return "-".join(m.modifier_list + [m.any_key])
@mod.capture(rule="(<self.letter> | <self.symbol> | <self.number_key>)")
def character(m) -> str:
"""Any key that can be typed as a character."""
return m[0]
@mod.capture(rule="<user.character> | {self.complex_symbol} | <self.letters>")
def insertable(m) -> str:
"""A char, or a complex insert."""
return m[0]
@mod.action_class
class Actions:
def modifier_key(modifier: str, key: str):
"""(TEMPORARY) Presses the modifier plus supplied number"""
res = "-".join([modifier, str(key)])
actions.key(res)
# TODO: Switch to many_keys
def many(keys: List[str]):
"""Press a list of keys in sequence."""
for key in keys:
actions.key(key)
def type_number(number: float):
"""Press each key in a number"""
# TODO: Allow leading zeros
for char in str(number):
actions.key(char)
# TODO: Remove (edit: no longer used?)
def insert_padded(string: str) -> None:
"""Insert a string with padding on each side."""
around = actions.user.surrounding_text()
if around:
if around.char_before != " ":
insert(" ")
insert(string)
if around.char_after != " ":
insert(" ")
else:
insert(f" {string} ")
def insert_key_padded(key: str) -> None:
"""Press a key to insert a char, but add spaces before and after.
e.g. "pad equals" -> insert(" = ")
"""
around = actions.user.surrounding_text()
if not around or around.char_before != " ":
insert(" ")
try:
actions.key(key)
except ValueError:
# HACK: To insert "keys" like `->`
actions.insert(key)
if not around or around.char_after != " ":
insert(" ")
|
def QuestionsMarks(str):
is_true = "false"
for index, char in enumerate(str):
if char == "?":
try:
if (str[index + 1] == "?") and (str[index + 2] == "?"):
is_true = "true"
except:
pass
# code goes here
return is_true
# keep this function call here
print QuestionsMarks(raw_input()) |
import logging
import requests
from requests.exceptions import RequestException
from geo_bot.models import Result, SearchArea, TelegramUser
def get_count_response(chat_id):
"""
Количество поисковых запросов по chat_id.
"""
message = []
user = TelegramUser.objects.filter(user_id=chat_id).order_by("-id")
if user:
message += f"Вы совершили {user.count()} поисковых запросов.\n\n"
message += [f"{response.result}\n" for response in user[:5]]
else:
message += ["Вы еще не сделали ни одного запроса."]
return message
def get_addresses(location):
"""
Получить ближайшие адреса по указанной локации.
"""
response = get_response_geo(location)
return response_processing(response)
def get_response_geo(location):
"""
Получение ответа по запросу поиска до ближайшего адреса указанной локации.
"""
from django_tg_bot.settings import API_GEO_TOKEN
try:
response = requests.get(
f"https://geocode-maps.yandex.ru/1.x?geocode={location}&apikey={API_GEO_TOKEN}&format=json&results=100",
)
except RequestException:
logging.exception("Can not get response client.")
return "Can not get response client."
return response
def response_processing(response):
"""
Выделение подходящих адресов из области поиска.
"""
authorized_areas = [area.title.lower() for area in SearchArea.objects.all()]
all_address = response.json()["response"]["GeoObjectCollection"]["featureMember"]
for address in all_address:
area = address["GeoObject"]["metaDataProperty"]["GeocoderMetaData"]["Address"][
"Components"
]
for component in area:
if component["name"].lower() in authorized_areas:
find_address = address["GeoObject"]["metaDataProperty"][
"GeocoderMetaData"
]["text"]
return find_address
def save_db(query, result, chat_id):
"""
Сохранение адреса в БД.
"""
result, _ = Result.objects.get_or_create(query=query, result=result)
TelegramUser.objects.get_or_create(user_id=chat_id, result=result)
|
# from gpcharts import figure
#
# #simple line graph, as described in the readme.
# fig1 = figure()
# fig1.plot([8,7,6,5,4])
#
# #another line graph, but with two data types. Also adding title
# fig2 = figure(title='Two lines',xlabel='Days',ylabel='Count',height=600,width=600)
# xVals = ['Mon','Tues','Wed','Thurs','Fri']
# yVals = [[5,4],[8,7],[4,8],[10,10],[3,12]]
# fig2.plot(xVals,yVals)
import random
# squares = []
# for x in range(10):
# squares.append(x**2)
#
# print(squares)
# squares = [x**2 for x in range(10) if x>4]
#
# # print(squares)
# # lst=[(x, x**2, x**3) for x in range(6)]
# # print(lst)
# z = sum([x for x in range(1,101) if x%2==0])
# print(z)
import requests
x = requests.get('http://news.am/arm')
start=1329
end=1423
for symbol in x:
print(symbol, ord(symbol))
#
# for i in range(start, end + 1):
# print(format(i, 'X'), end=' ') #hex
# print(i, end=' ') #dec
# print(chr(i)) #letter
# x = requests.get('http://news.am/')
# print(x.text)
|
import labscript_utils.h5_lock
import h5py
import numpy as np
import labscript_utils.properties as properties
from labscript_utils import dedent
class NI_DAQmxParser(object):
def __init__(self, path, device):
self.path = path
self.name = device.name
self.device = device
def get_traces(self, add_trace, clock=None):
with h5py.File(self.path, 'r') as f:
group = f['devices/' + self.name]
if 'AO' in group:
AO_table = group['AO'][:]
else:
AO_table = None
if 'DO' in f['devices/%s' % self.name]:
DO_table = group['DO'][:]
else:
DO_table = None
props = properties.get(f, self.name, 'connection_table_properties')
version = props.get('__version__', None)
if version is None:
msg = """Shot was compiled with the old version of the NI_DAQmx device
class. The new runviewer parser is not backward compatible with old
shot files. Either downgrade labscript_devices to 2.2.0 or less, or
recompile the shot with labscript_devices 2.3.0 or greater."""
raise RuntimeError(dedent(msg))
ports = props['ports']
static_AO = props['static_AO']
static_DO = props['static_DO']
times, clock_value = clock[0], clock[1]
clock_indices = np.where((clock_value[1:] - clock_value[:-1]) == 1)[0] + 1
# If initial clock value is 1, then this counts as a rising edge (clock should
# be 0 before experiment) but this is not picked up by the above code. So we
# insert it!
if clock_value[0] == 1:
clock_indices = np.insert(clock_indices, 0, 0)
clock_ticks = times[clock_indices]
traces = {}
if DO_table is not None:
ports_in_use = DO_table.dtype.names
for port_str in ports_in_use:
for line in range(ports[port_str]["num_lines"]):
# Extract each digital value from the packed bits:
line_vals = (((1 << line) & DO_table[port_str]) != 0).astype(float)
if static_DO:
line_vals = np.full(len(clock_ticks), line_vals[0])
traces['%s/line%d' % (port_str, line)] = (clock_ticks, line_vals)
if AO_table is not None:
for chan in AO_table.dtype.names:
vals = AO_table[chan]
if static_AO:
vals = np.full(len(clock_ticks), vals[0])
traces[chan] = (clock_ticks, vals)
triggers = {}
for channel_name, channel in self.device.child_list.items():
if channel.parent_port in traces:
trace = traces[channel.parent_port]
if channel.device_class == 'Trigger':
triggers[channel_name] = trace
add_trace(channel_name, trace, self.name, channel.parent_port)
return triggers
|
import tensorflow as tf
PATCH_SIZE = 96
LR_SCALE = 4
BATCH_SIZE = 16
buffer_size = 1024
patch_per_image = 128
LOG_STEP=1000
log_dir=logs\ESRGan
model_type='SRGAN_MSE'
FP16=False
image_dtype=tf.float32
use_div2k=True
use_div8k=False
blur_detection=True
MSE_after_bicubic=False
use_noise=True
progressive_training=False
espcn_growing=True
lr_reference=False
plot_PSNR=True
plot_LPIPS=True
init=tf.keras.initializers.GlorotUniform() # MSRA initilization
if FP16:
image_dtype=tf.float16
tf.keras.mixed_precision.set_global_policy('mixed_float16') #<-- Not much benefit for Tesla T4 (7.5 TFLOP) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PublicMsgKeyword import PublicMsgKeyword
from alipay.aop.api.domain.PublicMsgKeyword import PublicMsgKeyword
class AlipayOpenPublicTemplateMessageAddModel(object):
def __init__(self):
self._keyword_list = None
self._lib_code = None
self._opt_list = None
@property
def keyword_list(self):
return self._keyword_list
@keyword_list.setter
def keyword_list(self, value):
if isinstance(value, list):
self._keyword_list = list()
for i in value:
if isinstance(i, PublicMsgKeyword):
self._keyword_list.append(i)
else:
self._keyword_list.append(PublicMsgKeyword.from_alipay_dict(i))
@property
def lib_code(self):
return self._lib_code
@lib_code.setter
def lib_code(self, value):
self._lib_code = value
@property
def opt_list(self):
return self._opt_list
@opt_list.setter
def opt_list(self, value):
if isinstance(value, list):
self._opt_list = list()
for i in value:
if isinstance(i, PublicMsgKeyword):
self._opt_list.append(i)
else:
self._opt_list.append(PublicMsgKeyword.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.keyword_list:
if isinstance(self.keyword_list, list):
for i in range(0, len(self.keyword_list)):
element = self.keyword_list[i]
if hasattr(element, 'to_alipay_dict'):
self.keyword_list[i] = element.to_alipay_dict()
if hasattr(self.keyword_list, 'to_alipay_dict'):
params['keyword_list'] = self.keyword_list.to_alipay_dict()
else:
params['keyword_list'] = self.keyword_list
if self.lib_code:
if hasattr(self.lib_code, 'to_alipay_dict'):
params['lib_code'] = self.lib_code.to_alipay_dict()
else:
params['lib_code'] = self.lib_code
if self.opt_list:
if isinstance(self.opt_list, list):
for i in range(0, len(self.opt_list)):
element = self.opt_list[i]
if hasattr(element, 'to_alipay_dict'):
self.opt_list[i] = element.to_alipay_dict()
if hasattr(self.opt_list, 'to_alipay_dict'):
params['opt_list'] = self.opt_list.to_alipay_dict()
else:
params['opt_list'] = self.opt_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenPublicTemplateMessageAddModel()
if 'keyword_list' in d:
o.keyword_list = d['keyword_list']
if 'lib_code' in d:
o.lib_code = d['lib_code']
if 'opt_list' in d:
o.opt_list = d['opt_list']
return o
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.password_service, name="password_service"),
]
|
"""
This file contains methods for managing files and other helper classes, like formatters.
"""
import glob
import os
import shutil
import subprocess
from datetime import datetime
class FileOperations:
""" FileOperations class is used for handling file operations """
def __init__(self):
pass
@staticmethod
def construct_fullpath_raw(user, *args):
""" returns full path without any validation """
# fullpath = f"/home/{user[0]}/{user}"
fullpath = "/Users/ojarva/src/github-cleanup/filetmp/test"
for arg in args:
if len(arg) > 0:
fullpath = f"{fullpath}/{arg}"
return fullpath
@staticmethod
def construct_fullpath(user, *args):
""" constructs full path from args. Returns path if path doesn't
contain symbolic links or None if something fails.
"""
path = FileOperations.construct_fullpath_raw(user, *args)
""" As all file operations run using separate filemanager
user, symbolic links (and hard links too) are too dangerous
Resolving realpath and comparing it to original path
shows symbolic links, but doesn't help against hardlinks.
"""
fullpath = os.path.realpath(path)
if path != fullpath:
return None
return fullpath
@staticmethod
def valid_file(user, *args):
""" Return True if file exists and path doesn't contain any symbolic links,
False otherwise.
"""
fullpath = FileOperations.construct_fullpath(user, *args)
if not fullpath or not os.path.exists(fullpath):
return False
return True
@staticmethod
def save_permissions(user, folder, path, settings):
fullpath = FileOperations.construct_fullpath(user, folder, path)
if not fullpath:
return {"success": False, "status": "No symlinks are allowed"}
if not os.path.isdir(fullpath):
return {"success": False, "status": "Path is not directory"}
mode = settings.get("mode", "public")
if mode == "public":
# Clean up authentication configurations
try:
os.remove(fullpath+"/.htaccess")
except:
pass
try:
os.remove(fullpath+"/.htpasswd")
except:
pass
return {"success": True}
if mode == "sso":
contents = """Order deny,allow
Deny from all
Satisfy any
AuthType mod_auth_pubtkt
TKTAuthLoginURL https://login.futurice.com/
TKTAuthTimeoutURL https://login.futurice.com/?timeout=1
TKTAuthUnauthURL https://login.futurice.com/?unauth=1
TKTAuthToken "futu"
TKTAuthToken "ext"
Require valid-user"""
open(fullpath+"/.htaccess", "w").write(contents)
FileOperations.chown(user, fullpath+"/.htaccess")
return {"success": True}
if mode == "basicauth":
contents = """AuthType Basic
AuthName "Protected files"
AuthUserFile %s
Require valid-user""" % (fullpath+"/.htpasswd")
open(fullpath+"/.htaccess", "w").write(contents)
FileOperations.chown(user, fullpath+"/.htaccess")
p = subprocess.Popen(["htpasswd", "-cbs", fullpath+"/.htpasswd", settings.get("username"), settings.get("password")])
p.wait()
return {"success": True}
return {"success": False, "status": "Invalid method"}
@staticmethod
def get_file(user, *args):
""" Get details for single file """
fullpath = FileOperations.construct_fullpath(user, *args)
if not fullpath:
return {}
return FileOperations.get_file_raw(fullpath)
@staticmethod
def get_file_raw(file):
""" Get details for single file. Argument is filename. """
type = "unknown"
if os.path.isfile(file):
type = "file"
elif os.path.isdir(file):
type = "dir"
elif os.path.islink(file):
type = "link"
size = os.path.getsize(file)
mtime = os.path.getmtime(file)
mtime_readable = pretty_date(mtime)
return {"full_path": file, "filename": os.path.basename(file), "size": size, "type": type, "mtime": mtime, "mtime_readable": mtime_readable}
@staticmethod
def get_files(user, folder, path):
""" Get details for files inside folder """
fullpath = FileOperations.construct_fullpath(user, folder, path)
if not fullpath:
return []
if not os.path.isdir(fullpath):
return []
list_of_files = glob.glob("%s/*" % fullpath)
list_of_files.sort()
files_final = []
for file in list_of_files:
files_final.append(FileOperations.get_file_raw(file))
return files_final
@staticmethod
def chown(user, fullpath):
""" Change ownership for single file or folder (non-recursive). """
p = subprocess.Popen(["sudo", "/root/safe_chown.py", user, fullpath])
p.wait()
@staticmethod
def mkdir(user, folder, path, foldername):
""" Create new directory """
fullpath = FileOperations.construct_fullpath(user, folder, path, foldername)
if not fullpath:
return {"success": False, "status": "Invalid path (contains symbolic links)"}
if os.path.exists(fullpath):
return {"success": False, "status": "File already exists"}
os.mkdir(fullpath, 0o771)
FileOperations.chown(user, fullpath)
return {"success": True}
@staticmethod
def delete_file(user, folder, path):
fullpath = FileOperations.construct_fullpath(user, folder, path)
if fullpath and os.path.exists(fullpath):
try:
if os.path.isdir(fullpath):
shutil.rmtree(fullpath)
else:
os.remove(fullpath)
return {"success": True}
except Exception as e:
return {"success": False, "status": "rm failed: %s" % fullpath, "err": str(e)}
return {"success": False, "status": "No such file or directory"}
@staticmethod
def upload_file(f, user, folder, path):
""" Upload new file """
fullpath = FileOperations.construct_fullpath(user, folder, path, f.name)
if not fullpath:
return False
destination = open(fullpath, "wb")
for chunk in f.chunks():
destination.write(chunk)
destination.close()
FileOperations.chown(user, fullpath)
# from http://stackoverflow.com/a/1551394/592174
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.now()
if type(time) is int or type(time) is float:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time,datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str( second_diff / 60 ) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str( second_diff / 3600 ) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff/7) + " weeks ago"
if day_diff < 365:
return str(day_diff/30) + " months ago"
return str(day_diff/365) + " years ago"
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import unittest
from azure.storage.blob import (
BlobServiceClient,
ContainerClient,
BlobClient,
StandardBlobTier
)
from testcase import (
StorageTestCase,
record,
)
# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'blob'
# ------------------------------------------------------------------------------
class BlobStorageAccountTest(StorageTestCase):
def setUp(self):
super(BlobStorageAccountTest, self).setUp()
url = self._get_account_url()
credential = self._get_shared_key_credential()
self.bsc = BlobServiceClient(url, credential=credential)
self.container_name = self.get_resource_name('utcontainer')
if not self.is_playback():
self.bsc.create_container(self.container_name)
def tearDown(self):
if not self.is_playback():
try:
self.bsc.delete_container(self.container_name)
except:
pass
return super(BlobStorageAccountTest, self).tearDown()
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
blob_name = self.get_resource_name(TEST_BLOB_PREFIX)
return self.bsc.get_blob_client(self.container_name, blob_name)
def _create_blob(self):
blob = self._get_blob_reference()
blob.upload_blob(b'')
return blob
def assertBlobEqual(self, container_name, blob_name, expected_data):
blob = self.bsc.get_blob_client(container_name, blob_name)
actual_data = blob.download_blob().content_as_bytes()
self.assertEqual(actual_data, expected_data)
# --Tests specific to Blob Storage Accounts (not general purpose)------------
@record
def test_standard_blob_tier_set_tier_api(self):
container = self.bsc.get_container_client(self.container_name)
tiers = [StandardBlobTier.Archive, StandardBlobTier.Cool, StandardBlobTier.Hot]
for tier in tiers:
blob = self._get_blob_reference()
data = b'hello world'
blob.upload_blob(data)
blob_ref = blob.get_blob_properties()
self.assertIsNotNone(blob_ref.blob_tier)
self.assertTrue(blob_ref.blob_tier_inferred)
self.assertIsNone(blob_ref.blob_tier_change_time)
blobs = list(container.list_blobs())
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob.blob_name)
self.assertIsNotNone(blobs[0].blob_tier)
self.assertTrue(blobs[0].blob_tier_inferred)
self.assertIsNone(blobs[0].blob_tier_change_time)
blob.set_standard_blob_tier(tier)
blob_ref2 = blob.get_blob_properties()
self.assertEqual(tier, blob_ref2.blob_tier)
self.assertFalse(blob_ref2.blob_tier_inferred)
self.assertIsNotNone(blob_ref2.blob_tier_change_time)
blobs = list(container.list_blobs())
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob.blob_name)
self.assertEqual(blobs[0].blob_tier, tier)
self.assertFalse(blobs[0].blob_tier_inferred)
self.assertIsNotNone(blobs[0].blob_tier_change_time)
blob.delete_blob()
@record
def test_rehydration_status(self):
blob_name = 'rehydration_test_blob_1'
blob_name2 = 'rehydration_test_blob_2'
container = self.bsc.get_container_client(self.container_name)
data = b'hello world'
blob = container.upload_blob(blob_name, data)
blob.set_standard_blob_tier(StandardBlobTier.Archive)
blob.set_standard_blob_tier(StandardBlobTier.Cool)
blob_ref = blob.get_blob_properties()
self.assertEqual(StandardBlobTier.Archive, blob_ref.blob_tier)
self.assertEqual("rehydrate-pending-to-cool", blob_ref.archive_status)
self.assertFalse(blob_ref.blob_tier_inferred)
blobs = list(container.list_blobs())
blob.delete_blob()
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob.blob_name)
self.assertEqual(StandardBlobTier.Archive, blobs[0].blob_tier)
self.assertEqual("rehydrate-pending-to-cool", blobs[0].archive_status)
self.assertFalse(blobs[0].blob_tier_inferred)
blob2 = container.upload_blob(blob_name2, data)
blob2.set_standard_blob_tier(StandardBlobTier.Archive)
blob2.set_standard_blob_tier(StandardBlobTier.Hot)
blob_ref2 = blob2.get_blob_properties()
self.assertEqual(StandardBlobTier.Archive, blob_ref2.blob_tier)
self.assertEqual("rehydrate-pending-to-hot", blob_ref2.archive_status)
self.assertFalse(blob_ref2.blob_tier_inferred)
blobs = list(container.list_blobs())
# Assert
self.assertIsNotNone(blobs)
self.assertGreaterEqual(len(blobs), 1)
self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, blob2.blob_name)
self.assertEqual(StandardBlobTier.Archive, blobs[0].blob_tier)
self.assertEqual("rehydrate-pending-to-hot", blobs[0].archive_status)
self.assertFalse(blobs[0].blob_tier_inferred)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
# Copyright 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from oslo_log import log
from ironic_python_agent import errors
from ironic_python_agent import hardware
LOG = log.getLogger()
class CustomCleaningHardwareManager(hardware.HardwareManager):
# All hardware managers have a name and a version.
# Version should be bumped anytime a change is introduced. This will
# signal to Ironic that if automatic node cleaning is in progress to
# restart it from the beginning, to ensure consistency. The value can
# be anything; it's checked for equality against previously seen
# name:manager pairs.
HARDWARE_MANAGER_NAME = 'custom_cleaning_manager'
HARDWARE_MANAGER_VERSION = '1.0'
def evaluate_hardware_support(self):
"""Declare level of hardware support provided.
Since this example is explicitly about enforcing business logic during
cleaning, we want to return a static value.
:returns: HardwareSupport level for this manager.
"""
return hardware.HardwareSupport.SERVICE_PROVIDER
def get_clean_steps(self, node, ports):
return [
{
'step': 'erase_devices',
'priority': 0,
'interface': 'deploy',
'reboot_requested': False,
'abortable': False
},
{
'step': 'erase_devices_metadata',
'priority': 0,
'interface': 'deploy',
'reboot_requested': False,
'abortable': False
},
{
'step': 'get_root_disks',
'priority': 90,
'interface': 'deploy',
# If you need Ironic to coordinate a reboot after this step
# runs, but before continuing cleaning, this should be true.
'reboot_requested': False,
# If it's safe for Ironic to abort cleaning while this step
# runs, this should be true.
'abortable': False
},
{
'step': 'erase_root_disks',
'priority': 80,
'interface': 'deploy',
'reboot_requested': False,
'abortable': False
},
]
def get_root_disks(self, node, ports):
# find the list of the root disks by calling ironic_lib package
root_disks = utils.find_devices_by_hints(devices, root_device_hints)
"""Find all devices that match the root device hints.
Reference: https://github.com/openstack/ironic-lib/blob/6c9d5dc58b976c30be95340b8799daf485203994/ironic_lib/utils.py#L329
Try to find devices that match the root device hints. In order
for a device to be matched it needs to satisfy all the given hints.
:param root_device_hints: A dictionary with the root device hints.
:param devices: A list of dictionaries representing the devices
containing one or more of the following keys:
:name: (String) The device name, e.g /dev/sda
:size: (Integer) Size of the device in *bytes*
:model: (String) Device model
:vendor: (String) Device vendor name
:serial: (String) Device serial number
:wwn: (String) Unique storage identifier
:wwn_with_extension: (String): Unique storage identifier with
the vendor extension appended
:wwn_vendor_extension: (String): United vendor storage identifier
:rotational: (Boolean) Whether it's a rotational device or
not. Useful to distinguish HDDs (rotational) and SSDs
(not rotational).
:hctl: (String): The SCSI address: Host, channel, target and lun.
For example: '1:0:0:0'.
:by_path: (String): The alternative device name,
e.g. /dev/disk/by-path/pci-0000:00
:raises: ValueError, if some information is invalid.
:returns: A generator with all matching devices as dictionaries."""
return True
def erase_root_disks(self, node, ports):
""" Add logic to wipe out the root disks"""
return True |
"""
WSGI config for project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
import platform
from django.core.wsgi import get_wsgi_application
from django.core.management import execute_from_command_line
is_local = False
# check if the app is running on OpenShift
if not os.environ.get('OPENSHIFT_BUILD_NAMESPACE', False):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edivorce.settings.local")
is_local = True
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edivorce.settings.openshift")
if os.environ.get('POD_INIT_COMPLETE', "") != "True":
# gunicorn starts multiple threads and runs wsgi.py once for each thread. We only want
# these commands to run ONCE.
os.environ["POD_INIT_COMPLETE"] = "True"
# compress the static assets
execute_from_command_line(['manage.py', 'compress', '--force'])
question_fixture_path = '/opt/app-root/src/edivorce/fixtures/Question.json'
platform_name = platform.system()
if platform_name == "Windows":
question_fixture_path = os.path.realpath("./edivorce/fixtures/Question.json")
# load the Question fixture
if not is_local:
execute_from_command_line(['manage.py', 'loaddata', question_fixture_path])
application = get_wsgi_application()
|
# This config is mostly for testing than for demonstration
import os
import sys
import getpass
import tempfile
joinpath = os.path.join
#username = getpass.getuser() # portable way to get user name
#tmpdir = tempfile.gettempdir() # portable way to get temp directory
iswin32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt'
#realbuildroot = joinpath(tmpdir, username, 'projects', 'complex-unittest', 'build')
project = {
'name' : 'zm-complex-unittest',
}
LS_CMD = 'dir /B' if iswin32 else 'ls'
EXE = 'exe'
def somefunc(args):
print("somefunc: buildtype = %r" % args['buildtype'])
tasks = {
'shlib' : {
'features' : 'cxxshlib',
'source' : 'shlib/**/*.cpp',
'includes' : '.',
'run' : "echo 'This is runcmd in task \"shlib\"'",
#'configure' : [
# dict(do = 'check-headers', names = 'iostream'),
#],
# testing of 'configure.select' feature
'configure.select' : {
'default' : [
dict(do = 'check-headers', names = 'iostream'),
],
'linux' : [
dict(do = 'check-headers', names = 'iostream cstdio'),
]
}
},
'stlib' : {
'features' : 'cxxstlib',
'source' : 'stlib/**/*.cpp',
'includes' : '.',
'configure' : [
dict(do = 'check-headers', names = 'cstdio'),
],
},
'shlibmain' : {
'features' : 'cxxshlib',
'source' : 'shlibmain/**/*.cpp',
'includes' : '.',
'use' : 'shlib stlib ls',
},
'complex' : {
'features' : 'cxxprogram runcmd',
'source' : 'prog/**/*.cpp',
'includes' : '.',
'use' : 'shlibmain',
'run' : "echo 'This is runcmd in task \"complex\"'",
'install-path' : '$(prefix)/${EXE}',
},
'echo' : {
'run' : {
'cmd' : "echo say hello",
'repeat' : 2,
},
'use' : 'shlibmain',
'target' : '',
},
'ls' : {
'run' : {
'cmd' : '${LS_CMD}',
# a different way for the same result
#'cmd' : iswin32 and "dir /B" or "ls",
'cwd' : '.',
},
'target' : '',
},
'test.py' : {
'run' : {
'cmd' : '${PYTHON} tests/test.py',
'cwd' : '.',
'env' : { 'JUST_ENV_VAR' : 'qwerty', },
'shell' : False,
},
'use' : 'shlibmain',
'configure' : [ dict(do = 'find-program', names = 'python python3'), ],
'target' : '',
},
'altscript' : {
'run' : { 'cmd' : '"alt script.py"', 'cwd' : '.' },
'target' : '',
},
'pyfunc' : {
'run': somefunc,
'target' : '',
},
#### tasks for build/run tests
'stlib-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_stlib.cpp',
'use' : 'stlib testcmn',
},
'test from script' : {
'features' : 'test',
'run' : {
'cmd' : 'tests/test.py',
#'cmd' : '${PYTHON} tests/test.py',
'cwd' : '.',
'shell' : False,
},
'use' : 'complex',
'configure' : [ dict(do = 'find-program', names = 'python python3'), ]
},
'testcmn' : {
'features' : 'cxxshlib test',
'source' : 'tests/common.cpp',
'includes' : '.',
},
'shlib-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_shlib.cpp',
'use' : 'shlib testcmn',
'run' : {
'cmd' : '$(tgt) a b c',
#'cwd' : '.', # can be path relative to current project root path
#'cwd' : '.1',
'env' : { 'AZ' : '111', 'BROKEN_TEST' : 'false'},
'repeat' : 2,
'timeout' : 10, # in seconds, Python 3 only
'shell' : False,
},
'configure' : [ dict(do = 'check-headers', names = 'vector'), ]
},
'shlibmain-test' : {
'features' : 'cxxprogram test',
'source' : 'tests/test_shlibmain.cpp',
'use' : 'shlibmain testcmn',
},
#### these tasks are always failed but they're disabled: it's to check the 'enabled' param
'always-failed' : {
'run': "asdfghjklzxcvb",
'enabled' : False,
},
'always-failed2' : {
'run': "asdfghjklzxcvb2",
'enabled.select' : { 'default': False }
},
}
buildtypes = {
# -fPIC is necessary to compile static lib
'debug' : {
'toolchain.select' : {
'default': 'g++',
'macos' : 'clang++',
'windows': 'msvc',
},
'cxxflags.select' : {
'g++ or clang++' : '-fPIC -O0 -g',
'msvc' : '/Od /EHsc',
},
'linkflags.select' : {
'g++': '-Wl,--as-needed',
},
},
'release' : {
'toolchain.select' : {
'default': 'g++',
'macos' : 'clang++',
'windows': 'msvc',
},
'cxxflags.select' : {
'g++ or clang++' : '-fPIC -O2',
'msvc' : '/O2 /EHsc',
},
'linkflags.select' : {
'g++': '-Wl,--as-needed',
},
},
'default' : 'debug',
}
byfilter = [
#{ 'for' : 'all', 'set' : { 'rpath' : '.', } },
]
|
"""test start module."""
from itertools import product
try: # py3
from unittest import mock
except ImportError: # py2
import mock
import pytest
M_TEXT = 'query_text'
NOT_WORKING_ON_FULL_TEST = "Not working on full test."
@pytest.mark.xfail(reason=NOT_WORKING_ON_FULL_TEST)
def test_import():
"""test import.
error is raised
because profile populator run and raise error when testing.
"""
with pytest.raises(IOError):
from start import main # NOQA
@pytest.mark.xfail(reason=NOT_WORKING_ON_FULL_TEST)
def test_mock_pp():
"""test with mocked populator."""
with mock.patch('melissa.profile_populator.profile_populator'):
with pytest.raises(IOError):
from start import main # NOQA
@pytest.mark.xfail(reason=NOT_WORKING_ON_FULL_TEST)
def test_import_module():
"""test simple import."""
with pytest.raises(IOError):
import start # NOQA
@pytest.mark.parametrize(
'platform, m_stt_side_effect',
product(
['linux', 'win32', 'darwin', 'random'],
[
KeyboardInterrupt,
[None, KeyboardInterrupt],
[M_TEXT, KeyboardInterrupt],
]
)
)
def test_run_main(platform, m_stt_side_effect):
"""test run main func."""
with mock.patch('melissa.profile_loader.load_profile'):
with mock.patch('start.tts') as m_tts, \
mock.patch('start.subprocess') as m_subprocess, \
mock.patch('start.stt') as m_stt, \
mock.patch('start.load_profile') as m_load_profile, \
mock.patch('start.sys') as m_sys, \
mock.patch('start.query') as m_query:
# pre run
m_stt.side_effect = m_stt_side_effect
m_sys.platform = platform
# run
import start
with pytest.raises(KeyboardInterrupt):
start.main()
# test
m_load_profile.assert_called_once_with(True)
if platform.startswith('linux') or platform == 'win32':
m_subprocess.call.assert_called_with(
['mpg123', 'data/snowboy_resources/ding.wav'])
elif platform == 'darwin':
m_subprocess.call.assert_called_with(
['afplay', 'data/snowboy_resources/ding.wav'])
else:
m_subprocess.call.assert_not_called()
if m_stt_side_effect != KeyboardInterrupt:
if m_stt_side_effect[0] == M_TEXT:
m_query.assert_called_once_with(M_TEXT)
assert m_stt.call_count == 2
else:
m_query.assert_not_called()
m_stt.assert_called_once_with()
m_tts.assert_called()
|
from cycler import cycler
from matplotlib import rc_context
def supermongo():
"""Context manager to emulate the style of the SuperMongo plotting
library. Font is still not quite there yet.
Usage:
x = <data>
y = <other data>
with supermongo():
fig, ax = plt.subplots()
ax.plot(x, y)
fig.savefig('filename')
"""
return rc_context({
'axes.facecolor': 'none',
'axes.labelpad': 18.0,
'axes.labelsize': 'large',
'axes.linewidth': 0.6,
'axes.prop_cycle': cycler(color=[
'#FF0000', # red
'#00FF00', # green
'#0000FF', # blue
'#00FFFF', # cyan
'#FF00FF', # magenta
'#FFFF00', # yellow
'#000000', # black
]),
'backend': 'GTK3AGG',
'figure.autolayout': True,
'figure.edgecolor': 'none',
'figure.facecolor': 'none',
'figure.figsize': (6, 6),
'font.family': 'serif',
'font.size': 12,
'font.stretch': 'extra-expanded', # not implemented in matplotlib
'font.weight': 'bold',
'legend.frameon': False,
'legend.handleheight': 0.1,
'legend.handlelength': 2,
'legend.handletextpad': 0.3,
'legend.numpoints': 1,
'legend.scatterpoints': 1,
'lines.linewidth': 0.5,
'lines.markeredgewidth': 0.2,
'lines.markersize': 4,
'mathtext.cal': 'cursive',
'mathtext.fontset': 'cm',
'savefig.dpi': 300,
'savefig.edgecolor': 'none',
'savefig.facecolor': 'none',
'text.usetex': True,
'xtick.bottom': True,
'xtick.top': True,
'xtick.direction': 'in',
'xtick.labelsize': 'medium',
'xtick.major.pad': 5,
'xtick.major.size': 5,
'xtick.major.width': 0.6,
'xtick.minor.size': 2,
'xtick.minor.visible': True,
'xtick.minor.width': 0.6,
'ytick.left': True,
'ytick.right': True,
'ytick.direction': 'in',
'ytick.labelsize': 'medium',
'ytick.major.pad': 5,
'ytick.major.size': 5,
'ytick.major.width': 0.6,
'ytick.minor.size': 2,
'ytick.minor.visible': True,
'ytick.minor.width': 0.6,
})
|
import collections
from functools import reduce
from datetime import datetime
def clean_user(user):
'''
arg:
user: reddit.redditor('<username>')
'''
out = collections.OrderedDict()
out['id'] = user.id
out['name'] = user.name
# out['icon_img'] = user.icon_img
out['pull_ts'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
out['created_utc'] = datetime.utcfromtimestamp(user.created_utc).strftime('%Y-%m-%d %H:%M:%S')
out['link_karma'] = user.link_karma
out['comment_karma'] = user.comment_karma
out['is_employee'] = user.is_employee
out['is_mod'] = user.is_mod
out['verified'] = user.verified
return out
def getattr_deep(obj, attr, default=''):
if obj is None:
return default
attr_list = attr.split('.')
if attr_list[0] in vars(obj).keys():
obj = getattr(obj, attr_list[0])
else:
return default
if len(attr_list) == 1:
return obj
else:
return getattr_deep(obj, '.'.join(attr_list[1:]))
def clean_comment(comment):
out = collections.OrderedDict()
def parse_author_id(comment):
obj = getattr(comment, 'author', None)
if obj is None:
return ''
print(vars(obj).keys())
print(str(obj))
print(type(obj.name))
if 'id' in vars(obj).keys():
return getattr(obj, 'id', '')
return ''
def parse_author_name(comment):
obj = getattr(comment, 'author', None)
if obj is None:
return ''
print(vars(obj).keys())
print(str(obj))
print(type(obj.name))
if 'id' in vars(obj).keys():
return getattr(obj, 'name', '')
return ''
def deepgetattr(obj, attr):
"""Recurses through an attribute chain to get the ultimate value."""
return reduce(getattr, attr.split('.'), obj)
out['id'] = comment.id
out['author'] = getattr_deep(comment, 'author.name')
out['author_id'] = getattr_deep(comment, 'author.id')
out['name'] = getattr_deep(comment, 'name', '')
out['parent_id'] = getattr_deep(comment, 'parent_id', '')
out['link_id'] = getattr_deep(comment, 'link_id', '')
# out['subreddit'] = comment.subreddit.display_name
out['subreddit_id'] = getattr_deep(comment, 'subreddit.id', '')
# out['permalink'] = comment.permalink
out['pull_ts'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
out['created_utc'] = datetime.utcfromtimestamp(comment.created_utc).strftime('%Y-%m-%d %H:%M:%S')
out['depth'] = getattr_deep(comment, 'depth', 0)
out['edited'] = getattr_deep(comment, 'edited', False)
out['gilded'] = getattr_deep(comment, 'gilded', False)
out['score'] = getattr_deep(comment, 'score', 0)
out['ups'] = getattr_deep(comment, 'ups', 0)
out['downs'] = getattr_deep(comment, 'downs', 0)
out['controversiality'] = getattr_deep(comment, 'controversiality', 0)
out['score_hidden'] = getattr_deep(comment, 'score_hidden', False)
out['collapsed'] = getattr_deep(comment, 'collapsed', False)
out['body'] = getattr_deep(comment, 'body')
return out
if __name__ == '__main__':
v = ",".join(["%s"] * 3)
print(v)
v = v.split(',')
print(v)
print(len(v))
print(v[1:])
print(len(v[1:]))
print('.'.join(v[1:])) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'crash_dbg_form.ui'
#
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from sgtk.platform.qt import QtCore, QtGui
class Ui_CrashDbgForm(object):
def setupUi(self, CrashDbgForm):
CrashDbgForm.setObjectName("CrashDbgForm")
CrashDbgForm.resize(503, 395)
self.verticalLayout = QtGui.QVBoxLayout(CrashDbgForm)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.tree_view = QtGui.QTreeView(CrashDbgForm)
self.tree_view.setObjectName("tree_view")
self.horizontalLayout.addWidget(self.tree_view)
self.list_view = QtGui.QListView(CrashDbgForm)
self.list_view.setObjectName("list_view")
self.horizontalLayout.addWidget(self.list_view)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(CrashDbgForm)
QtCore.QMetaObject.connectSlotsByName(CrashDbgForm)
def retranslateUi(self, CrashDbgForm):
CrashDbgForm.setWindowTitle(QtGui.QApplication.translate("CrashDbgForm", "Form", None, QtGui.QApplication.UnicodeUTF8))
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# addressbook
# https://github.com/tgorka/addressbook
#
# Copyright (C) 2016 Tomasz Gorka <http://tomasz.gorka.org.pl>
#
import uuid
import datetime
from addressbook import exception, trie
class AddressBook:
"""
Address Book structure contains groups and persons.
"""
def __init__(self):
"""
Initialize address book with all the needed structures
"""
# initialize groups and persons
self.groups = set()
self.persons = set()
# hash maps for searching over first and the last person name
self.first_names = {}
self.last_names = {}
# hash maps for searching over email and trie with emails
self.emails = {}
self.emailsTrie = trie.Trie()
def add_person(self, person):
"""
Add a person to the address book.
:param person: to add
:raises AlreadyPresentException: person is already added
"""
if person in self.persons:
raise exception.AlreadyPresentException(
'Person %s has been already added.' % person.name)
self.persons.add(person)
for group in person.groups:
# double check if group is not already in the address book
if group not in self.groups:
self.groups.add(group)
# add index for first and the last name for searching
self.first_names.setdefault(person.first_name.lower(), set()).add(person)
self.last_names.setdefault(person.last_name.lower(), set()).add(person)
# add index for email for searching
for email in person.emails:
self.emails.setdefault(email.lower(), set()).add(person)
self.emailsTrie.add(email.lower())
def add_group(self, group):
"""
Add a group to the address book.
:param group: to add
:raises AlreadyPresentException: group is already added
"""
if group in self.groups:
raise exception.AlreadyPresentException(
'Group %s has been already added.' % group.name)
self.groups.add(group)
for person in group.persons:
# double check if group is set for the person
if group not in person.groups:
person.groups.add(group)
if person not in self.persons:
self.add_person(person)
def find_persons_by_group(self, group):
"""
Given a group we want to easily find its members.
:param group: to filter
:return: set of persons
"""
return group.persons
def find_groups_by_person(self, person):
"""
Given a person we want to easily find the groups the person belongs to.
:param person: to filter
:return: set of groups
"""
return person.groups
def find_persons_by_name(self, first_name=None, last_name=None):
"""
Find person by name (can supply either first name, last name, or both).
:param first_name: first name of the person
:param last_name: last name of the person
:return: set of persons
"""
# get the sets of persons by first name and last name
persons_by_first_name = self.first_names.get(first_name.lower(), set()) \
if first_name is not None else self.persons
persons_by_last_name = self.last_names.get(last_name.lower(), set()) \
if last_name is not None else self.persons
# return the common part of this 2 sets
return persons_by_first_name & persons_by_last_name
def find_persons_by_email(self, email_prefix):
"""
Find person by email address (can supply either the exact string or
a prefix string, ie. both "alexander@company.com" and "alex").
:param email_prefix: to find the person
:return: set of persons
"""
persons = set()
for email in self.emailsTrie.get(email_prefix.lower()):
persons |= self.emails.get(email, set())
return persons
class Person:
"""
Person structure:
- A person has a first name and a last name.
- A person has one or more street addresses.
- A person has one or more email addresses.
- A person has one or more phone numbers.
- A person can be a member of one or more groups.
"""
def __init__(self, first_name, last_name, id=None):
"""
Initialize Person with all needed structures.
If id is None the default one will be generated using UUID algorithm.
IMPORTANT: recognize person by id not by name because sometimes
a few people has the same name (the first and the last one)
:param first_name: of the person.
:param last_name: of the person.
:param id: identifier of the person. None by default.
"""
self.__id = id if id is not None else uuid.uuid4()
self.creation_date = datetime.datetime.now()
self.first_name = first_name
self.last_name = last_name
self.emails = set()
self.phones = set()
self.addresses = set()
self.groups = set()
@property
def name(self):
"""
Get simplified name based of first name and second name
:return: full name
"""
return "%s %s" % (self.first_name, self.last_name)
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
raise Exception("Can't change the ID, once it has ben set in "
+ "the constructor.")
def __eq__(self, other):
"""
Equal method
Use id to equal 2 persons by id.
So it will work even if person has changed the name and only one object
will represent current state.
:param other: object to compare
:return: is the same
"""
if isinstance(other, self.__class__):
return self.id is other.id
return False
def __ne__(self, other):
"""
Not equal method
Use id to equal 2 persons by id.
So it will work even if person has changed the name and only one object
will represent current state.
:param other: object to compare
:return: is different
"""
if isinstance(other, self.__class__):
return self.id is not other.id
return True
def __hash__(self):
"""
Hash method
Use hash of the id property.
:return: hash code
"""
return hash(self.id)
class Address:
"""
Address structure.
"""
def __init__(self, *street_address_lines):
"""
Initialize Address with all needed structures.
:param street_address_lines: all the lines needed to be in the envelope.
"""
self.street_address_lines = street_address_lines
self.creation_date = datetime.datetime.now()
class Group:
"""
Group structure.
"""
def __init__(self, name):
"""
Initialize Group with all needed structures.
The persons connected to th group will be empty at the beginning.
:param name: of the group.
"""
self.name = name
self.creation_date = datetime.datetime.now()
self.persons = set()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2007-2011 NovaReto GmbH
# cklinger@novareto.de
import grok
import zope.component
import uvcsite.plugins
from ukh.adhoc.interfaces import IUKHAdHocApp
from ukh.adhoc.auth import UserAuthenticatorPlugin
from zope.pluggableauth import PluggableAuthentication
from zope.pluggableauth.interfaces import IAuthenticatorPlugin
from grokcore.registries import create_components_registry
from zope.authentication.interfaces import IAuthentication
adhocRegistry = create_components_registry(
name="ukhadhocRegistry",
bases=(zope.component.globalSiteManager,)
)
def setup_pau(PAU):
PAU.authenticatorPlugins = ("users",)
PAU.credentialsPlugins = (
"ukh_cookies",
"Zope Realm Basic-Auth",
"No Challenge if Authenticated",
)
class UKHAdHocApp(grok.Application, grok.Container):
grok.implements(IUKHAdHocApp)
grok.local_utility(
PluggableAuthentication,
IAuthentication,
public=True,
setup=setup_pau
)
grok.local_utility(
UserAuthenticatorPlugin,
provides=IAuthenticatorPlugin,
name="users"
)
def getSiteManager(self):
current = super(UKHAdHocApp, self).getSiteManager()
if adhocRegistry not in current.__bases__:
adhocRegistry.__bases__ = tuple([
x for x in adhocRegistry.__bases__
if x.__hash__() != zope.component.globalSiteManager.__hash__()
])
current.__bases__ = (adhocRegistry,) + current.__bases__
else:
if current.__bases__.index(adhocRegistry) == 1:
current.__bases__ = current.__bases__[::-1]
return current
|
#!/usr/bin/python
txt_dir = '/home/TestData/DPI_DATA/guangzhou_LTE/DPI_test_data/2'
xml_dir = '/home/ligang/xml'
xml_template = '/home/test_template.xml'
exclude = ['pcapstc.txt', 'badfile.txt']
#------------------------------------------------------------------------------------------
import os
#find all txt files
txt_files = []
if not os.path.exists(txt_dir):
print('ERROR: %s not exist!' % txt_dir)
exit()
for file in os.listdir(txt_dir):
full_name = os.path.join(txt_dir, file)
if os.path.isfile(full_name) and file.split('.')[-1] == 'txt' and file not in exclude:
txt_files.append(full_name)
txt_files.sort()
print('INFO: find %d txt files in %s' % (len(txt_files), txt_dir))
#read all txt files
all_data = {}
for txt in txt_files:
try:
with open(txt) as f:
lines = f.readlines()[4:]
all_data.setdefault(txt, lines)
except:
print('ERROR: read %s fail' % txt)
continue
#generate xml files
#it's hard to parse as xml file because of gb2312 and hanzi
if not os.path.exists(xml_dir):
os.mkdir(xml_dir)
with open(xml_template) as template:
template_content = template.read()
for key in all_data:
file = os.path.split(key)[-1]
short_name = file.split('.')[0].split('_')
service_type = short_name[0]
short_name.remove(service_type)
lines = all_data[key]
print('%s' % file)
for i in range(0, len(lines)):
try:
words = lines[i].split()
xml_file_name = service_type + '_' + words[3] + '_' + '_'.join(short_name) + '_' + str(i+1) + '.xml'
xml_file_fullname = os.path.join(xml_dir, xml_file_name)
with open(xml_file_fullname, 'w') as xml_file:
template_content_copy = template_content
template_content_copy = template_content_copy.replace('template_datasource', file.split('.')[0])
template_content_copy = template_content_copy.replace('template_sgsndataip', words[0])
template_content_copy = template_content_copy.replace('template_ggsndataip', words[1])
template_content_copy = template_content_copy.replace('template_srcip', words[2])
template_content_copy = template_content_copy.replace('template_dstip', words[3])
template_content_copy = template_content_copy.replace('template_val', words[6]+','+words[7]+','+words[8]+','+words[9])
xml_file.write(template_content_copy)
print(' %s' % xml_file_name)
except:
print('ERROR: %s format error' % file)
break
print('INFO: finish generate xml files in %s' % xml_dir)
|
values = [1, 2, 3, 4, 5]
# You can check the veracity of either one statement or another statement;
# if either check evaluates to True, the whole expression will be True
statement = 1 in values or 6 in values
print(f"{statement}: either 1 or 6 is in 'values'.")
# Note that you can't write something like:
# statement = 1 or 6 in values
# You have to do 2 whole checks
|
from __future__ import absolute_import, division, print_function
import logging
import math
import iotbx.phil
from cctbx.array_family import flex
from cctbx import miller
from dials.util import Sorry
from scitbx import lbfgs
from dials.algorithms.scaling.scaling_library import determine_best_unit_cell
from dials.util.batch_handling import (
calculate_batch_offsets,
assign_batches_to_reflections,
)
from dials.util.filter_reflections import filter_reflection_table
logger = logging.getLogger(__name__)
def poly_residual(xp, y, params):
"""Compute the residual between the observations y[i] and sum_j
params[j] x[i]^j. For efficiency, x[i]^j are pre-calculated in xp."""
c = len(y)
e = flex.double([flex.sum(xp[j] * params) for j in range(c)])
return flex.sum(flex.pow2(y - e))
def poly_gradients(xp, y, params):
"""Compute the gradient of the residual w.r.t. the parameters, N.B.
will be performed using a finite difference method. N.B. this should
be trivial to do algebraicly."""
eps = 1.0e-6
g = flex.double()
n = len(params)
for j in range(n):
rs = []
for signed_eps in [-eps, eps]:
params_eps = params[:]
params_eps[j] += signed_eps
rs.append(poly_residual(xp, y, params_eps))
g.append((rs[1] - rs[0]) / (2 * eps))
return g
class poly_fitter(object):
"""A class to do the polynomial fit. This will fit observations y
at points x with a polynomial of order n."""
def __init__(self, points, values, order):
self.x = flex.double([1.0 for j in range(order)])
self._x = flex.double(points)
self._y = flex.double(values)
# precalculate x[j]^[0-(n - 1)] values
self._xp = [
flex.double([math.pow(x, j) for j in range(order)]) for x in self._x
]
def refine(self):
"""Actually perform the parameter refinement."""
tp = lbfgs.termination_parameters(max_iterations=1000)
r = lbfgs.run(target_evaluator=self, termination_params=tp)
return r
def compute_functional_and_gradients(self):
return (
poly_residual(self._xp, self._y, self.x),
poly_gradients(self._xp, self._y, self.x),
)
def evaluate(self, x):
"""Evaluate the resulting fit at point x."""
return sum(math.pow(x, k) * sxk for k, sxk in enumerate(self.x))
def fit(x, y, order):
"""Fit the values y(x) then return this fit. x, y should
be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
logger.debug("fitter: %s %s %s", (x, y, order))
pf = poly_fitter(x, y, order)
logger.debug("fitter: refine")
pf.refine()
logger.debug("fitter: done")
return [pf.evaluate(_x) for _x in x]
def tanh_fit(x, y, iqr_multiplier=None):
from scitbx.math import curve_fitting
tf = curve_fitting.tanh_fit(x, y)
f = curve_fitting.tanh(*tf.params)
if iqr_multiplier is not None:
assert iqr_multiplier > 0
yc = f(x)
dy = y - yc
from scitbx.math import five_number_summary
min_x, q1_x, med_x, q3_x, max_x = five_number_summary(dy)
iqr_x = q3_x - q1_x
cut_x = iqr_multiplier * iqr_x
outliers = (dy > q3_x + cut_x) | (dy < q1_x - cut_x)
if outliers.count(True) > 0:
xo = x.select(~outliers)
yo = y.select(~outliers)
tf = curve_fitting.tanh_fit(xo, yo)
f = curve_fitting.tanh(*tf.params)
return f(x)
def log_fit(x, y, order):
"""Fit the values log(y(x)) then return exp() to this fit. x, y should
be iterables containing floats of the same size. The order is the order
of polynomial to use for this fit. This will be useful for e.g. I/sigma."""
ly = [math.log(_y) for _y in y]
pf = poly_fitter(x, ly, order)
pf.refine()
return [math.exp(pf.evaluate(_x)) for _x in x]
def log_inv_fit(x, y, order):
"""Fit the values log(1 / y(x)) then return the inverse of this fit.
x, y should be iterables, the order of the polynomial for the transformed
fit needs to be specified. This will be useful for e.g. Rmerge."""
ly = [math.log(1.0 / _y) for _y in y]
pf = poly_fitter(x, ly, order)
pf.refine()
return [(1.0 / math.exp(pf.evaluate(_x))) for _x in x]
def interpolate_value(x, y, t):
"""Find the value of x: y(x) = t."""
if t > max(y) or t < min(y):
raise RuntimeError("t outside of [%f, %f]" % (min(y), max(y)))
for j in range(1, len(x)):
x0 = x[j - 1]
y0 = y[j - 1]
x1 = x[j]
y1 = y[j]
if (y0 - t) * (y1 - t) < 0:
return x0 + (t - y0) * (x1 - x0) / (y1 - y0)
def miller_array_from_mtz(unmerged_mtz, params):
from iotbx import reflection_file_reader
hkl_in = reflection_file_reader.any_reflection_file(unmerged_mtz)
miller_arrays = hkl_in.as_miller_arrays(
merge_equivalents=False, anomalous=params.anomalous
)
i_obs = None
batches = None
all_i_obs = []
for array in miller_arrays:
labels = array.info().label_string()
if array.is_xray_intensity_array():
all_i_obs.append(array)
if labels == "BATCH":
assert batches is None
batches = array
if i_obs is None:
if len(all_i_obs) == 0:
raise Sorry("No intensities found")
elif len(all_i_obs) > 1:
if params.labels is not None:
from iotbx.reflection_file_utils import label_table
lab_tab = label_table(all_i_obs)
i_obs = lab_tab.select_array(
label=params.labels[0], command_line_switch="labels"
)
if i_obs is None:
raise Sorry(
"Multiple intensity arrays - please specify one:\n%s"
% "\n".join(
[" labels=%s" % a.info().label_string() for a in all_i_obs]
)
)
else:
i_obs = all_i_obs[0]
if hkl_in.file_type() == "ccp4_mtz":
# need original miller indices otherwise we don't get correct anomalous
# merging statistics
mtz_object = hkl_in.file_content()
if "M_ISYM" in mtz_object.column_labels():
indices = mtz_object.extract_original_index_miller_indices()
i_obs = i_obs.customized_copy(indices=indices, info=i_obs.info())
return i_obs, batches
phil_str = """
rmerge = None
.type = float(value_min=0)
.help = "Maximum value of Rmerge in the outer resolution shell"
.short_caption = "Outer shell Rmerge"
.expert_level = 1
completeness = None
.type = float(value_min=0)
.help = "Minimum completeness in the outer resolution shell"
.short_caption = "Outer shell completeness"
.expert_level = 1
cc_ref = 0.1
.type = float(value_min=0)
.help = "Minimum value of CC vs reference dataset in the outer resolution shell"
.short_caption = "Outer shell CCref"
.expert_level = 1
cc_half = 0.3
.type = float(value_min=0)
.help = "Minimum value of CC1/2 in the outer resolution shell"
.short_caption = "Outer shell CC1/2"
.expert_level = 1
cc_half_method = *half_dataset sigma_tau
.type = choice
cc_half_significance_level = 0.1
.type = float(value_min=0, value_max=1)
.expert_level = 1
cc_half_fit = polynomial *tanh
.type = choice
.expert_level = 1
isigma = 0.25
.type = float(value_min=0)
.help = "Minimum value of the unmerged <I/sigI> in the outer resolution shell"
.short_caption = "Outer shell unmerged <I/sigI>"
.expert_level = 1
misigma = 1.0
.type = float(value_min=0)
.help = "Minimum value of the merged <I/sigI> in the outer resolution shell"
.short_caption = "Outer shell merged <I/sigI>"
.expert_level = 1
i_mean_over_sigma_mean = None
.type = float(value_min=0)
.help = "Minimum value of the unmerged <I>/<sigI> in the outer resolution shell"
.short_caption = "Outer shell unmerged <I>/<sigI>"
.expert_level = 2
nbins = 100
.type = int
.help = "Number of resolution bins to use for estimation of resolution limit."
.short_caption = "Number of resolution bins."
.expert_level = 1
binning_method = *counting_sorted volume
.type = choice
.help = "Use equal-volume bins or bins with approximately equal numbers of reflections per bin."
.short_caption = "Equal-volume or equal #ref binning."
.expert_level = 1
anomalous = False
.type = bool
.short_caption = "Keep anomalous pairs separate in merging statistics"
.expert_level = 1
labels = None
.type = strings
space_group = None
.type = space_group
.expert_level = 1
reference = None
.type = path
"""
phil_defaults = iotbx.phil.parse(
"""
resolutionizer {
%s
batch_range = None
.type = ints(size=2, value_min=0)
plot = False
.type = bool
.expert_level = 2
}
"""
% phil_str
)
class resolution_plot(object):
def __init__(self, ylabel):
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
pyplot.style.use("ggplot")
self.ylabel = ylabel
self.fig = pyplot.figure()
self.ax = self.fig.add_subplot(111)
def plot(self, d_star_sq, values, label):
self.ax.plot(d_star_sq, values, label=label)
if label.startswith("CC"):
ylim = self.ax.get_ylim()
self.ax.set_ylim(0, max(ylim[1], 1.05))
def plot_resolution_limit(self, d):
from cctbx import uctbx
d_star_sq = uctbx.d_as_d_star_sq(d)
self.ax.plot([d_star_sq, d_star_sq], self.ax.get_ylim(), linestyle="--")
def savefig(self, filename):
from cctbx import uctbx
xticks = self.ax.get_xticks()
xticks_d = [
"%.2f" % uctbx.d_star_sq_as_d(ds2) if ds2 > 0 else 0 for ds2 in xticks
]
self.ax.set_xticklabels(xticks_d)
self.ax.set_xlabel("Resolution (A)")
self.ax.set_ylabel(self.ylabel)
self.ax.legend(loc="best")
self.fig.savefig(filename)
class Resolutionizer(object):
"""A class to calculate things from merging reflections."""
def __init__(self, i_obs, params, batches=None, reference=None):
self._params = params
self._reference = reference
if self._reference is not None:
self._reference = self._reference.merge_equivalents(
use_internal_variance=False
).array()
i_obs = i_obs.customized_copy(
anomalous_flag=params.anomalous, info=i_obs.info()
)
if self._params.batch_range is not None and batches is not None:
batch_min, batch_max = self._params.batch_range
assert batches is not None
sel = (batches.data() >= batch_min) & (batches.data() <= batch_max)
i_obs = i_obs.select(sel).set_info(i_obs.info())
if self._params.space_group is not None:
i_obs = i_obs.customized_copy(
space_group_info=self._params.space_group, info=i_obs.info()
)
self._intensities = i_obs
import iotbx.merging_statistics
self._merging_statistics = iotbx.merging_statistics.dataset_statistics(
i_obs=i_obs,
n_bins=self._params.nbins,
cc_one_half_significance_level=self._params.cc_half_significance_level,
cc_one_half_method=self._params.cc_half_method,
binning_method=self._params.binning_method,
anomalous=params.anomalous,
use_internal_variance=False,
eliminate_sys_absent=False,
assert_is_not_unique_set_under_symmetry=False,
)
@classmethod
def from_unmerged_mtz(cls, scaled_unmerged, params):
"""Construct the resolutionizer from an mtz file."""
i_obs, batches = miller_array_from_mtz(scaled_unmerged, params)
if params.reference is not None:
reference, _ = miller_array_from_mtz(params.reference, params)
else:
reference = None
return cls(i_obs, params, batches=batches, reference=reference)
@classmethod
def from_reflections_and_experiments(cls, reflection_tables, experiments, params):
"""Construct the resolutionizer from native dials datatypes."""
# add some assertions about data
# do batch assignment (same functions as in dials.export)
offsets = calculate_batch_offsets(experiments)
reflection_tables = assign_batches_to_reflections(reflection_tables, offsets)
batches = flex.int()
intensities = flex.double()
indices = flex.miller_index()
variances = flex.double()
for table in reflection_tables:
table = filter_reflection_table(table, ["scale"], partiality_threshold=0.4)
batches.extend(table["batch"])
intensities.extend(table["intensity.scale.value"])
indices.extend(table["miller_index"])
variances.extend(table["intensity.scale.variance"])
crystal_symmetry = miller.crystal.symmetry(
unit_cell=determine_best_unit_cell(experiments),
space_group=experiments[0].crystal.get_space_group(),
assert_is_compatible_unit_cell=False,
)
miller_set = miller.set(crystal_symmetry, indices, anomalous_flag=False)
i_obs = miller.array(miller_set, data=intensities, sigmas=flex.sqrt(variances))
i_obs.set_observation_type_xray_intensity()
i_obs.set_info(miller.array_info(source="DIALS", source_type="refl"))
ms = i_obs.customized_copy()
batch_array = miller.array(ms, data=batches)
if params.reference is not None:
reference, _ = miller_array_from_mtz(params.reference, params)
else:
reference = None
return cls(i_obs, params, batches=batch_array, reference=reference)
def resolution_auto(self):
"""Compute resolution limits based on the current self._params set."""
if self._params.rmerge:
logger.info("Resolution rmerge: %.2f", self.resolution_rmerge())
if self._params.completeness:
logger.info("Resolution completeness: %.2f", self.resolution_completeness())
if self._params.cc_half:
logger.info("Resolution cc_half: %.2f", self.resolution_cc_half())
if self._params.cc_ref and self._reference is not None:
logger.info("Resolution cc_ref: %.2f", self.resolution_cc_ref())
if self._params.isigma:
logger.info(
"Resolution I/sig: %.2f", self.resolution_unmerged_isigma()
)
if self._params.misigma:
logger.info(
"Resolution Mn(I/sig): %.2f", self.resolution_merged_isigma()
)
if self._params.i_mean_over_sigma_mean:
logger.info(
"Resolution Mn(I)/Mn(sig): %.2f",
self.resolution_i_mean_over_sigma_mean(),
)
def resolution_rmerge(self, limit=None):
"""Compute a resolution limit where either rmerge = 1.0 (limit if
set) or the full extent of the data. N.B. this fit is only meaningful
for positive values."""
if limit is None:
limit = self._params.rmerge
rmerge_s = flex.double(
[b.r_merge for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
sel = rmerge_s > 0
rmerge_s = rmerge_s.select(sel)
s_s = s_s.select(sel)
if limit == 0.0:
r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
rmerge_f = None
elif limit > flex.max(rmerge_s):
r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
rmerge_f = None
else:
rmerge_f = log_inv_fit(s_s, rmerge_s, 6)
for j, s in enumerate(s_s):
logger.debug(
"%f %f %f %f\n", s, 1.0 / math.sqrt(s), rmerge_s[j], rmerge_f[j]
)
try:
r_rmerge = 1.0 / math.sqrt(interpolate_value(s_s, rmerge_f, limit))
except Exception:
r_rmerge = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Rmerge")
if rmerge_f is not None:
plot.plot(s_s, rmerge_f, label="fit")
plot.plot(s_s, rmerge_s, label="Rmerge")
plot.plot_resolution_limit(r_rmerge)
plot.savefig("rmerge.png")
return r_rmerge
def resolution_i_mean_over_sigma_mean(self, limit=None):
"""Compute a resolution limit where either <I>/<sigma> = 1.0 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.i_mean_over_sigma_mean
isigma_s = flex.double(
[b.i_mean_over_sigi_mean for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
sel = isigma_s > 0
isigma_s = isigma_s.select(sel)
s_s = s_s.select(sel)
if flex.min(isigma_s) > limit:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
isigma_f = None
else:
isigma_f = log_fit(s_s, isigma_s, 6)
for j, s in enumerate(s_s):
logger.debug(
"%f %f %f %f\n", s, 1.0 / math.sqrt(s), isigma_s[j], isigma_f[j]
)
try:
r_isigma = 1.0 / math.sqrt(interpolate_value(s_s, isigma_f, limit))
except Exception:
if limit > max(isigma_f):
r_isigma = 1.0 / math.sqrt(flex.min(s_s))
else:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Unmerged <I>/<sigma>")
if isigma_f is not None:
plot.plot(s_s, isigma_f, label="fit")
plot.plot(s_s, isigma_s, label="Unmerged <I>/<sigma>")
plot.plot_resolution_limit(r_isigma)
plot.savefig("i_mean_over_sigma_mean.png")
return r_isigma
def resolution_unmerged_isigma(self, limit=None):
"""Compute a resolution limit where either I/sigma = 1.0 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.isigma
return self._resolution_sigma(
limit,
get_mean=lambda b: b.unmerged_i_over_sigma_mean,
label="Unmerged I/sigma",
fig_filename="isigma.png",
)
def resolution_merged_isigma(self, limit=None):
"""Compute a resolution limit where either Mn(I/sigma) = 1.0 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.misigma
return self._resolution_sigma(
limit,
get_mean=lambda b: b.i_over_sigma_mean,
label="Merged I/sigma",
fig_filename="misigma.png",
)
def _resolution_sigma(self, limit, get_mean, label, fig_filename):
isigma_s = flex.double(map(get_mean, self._merging_statistics.bins)).reversed()
s_s = flex.double(
1 / b.d_min ** 2 for b in self._merging_statistics.bins
).reversed()
sel = isigma_s > 0
isigma_s = isigma_s.select(sel)
s_s = s_s.select(sel)
if flex.min(isigma_s) > limit:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
isigma_f = None
else:
isigma_f = log_fit(s_s, isigma_s, 6)
for j, s in enumerate(s_s):
logger.debug(
"%f %f %f %f\n", s, 1.0 / math.sqrt(s), isigma_s[j], isigma_f[j]
)
try:
r_isigma = 1.0 / math.sqrt(interpolate_value(s_s, isigma_f, limit))
except Exception:
r_isigma = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel=label)
if isigma_f is not None:
plot.plot(s_s, isigma_f, label="fit")
plot.plot(s_s, isigma_s, label=label)
plot.plot_resolution_limit(r_isigma)
plot.savefig(fig_filename)
return r_isigma
def resolution_completeness(self, limit=None):
"""Compute a resolution limit where completeness < 0.5 (limit if
set) or the full extent of the data. N.B. this completeness is
with respect to the *maximum* completeness in a shell, to reflect
triclinic cases."""
if limit is None:
limit = self._params.completeness
comp_s = flex.double(
[b.completeness for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
if flex.min(comp_s) > limit:
r_comp = 1.0 / math.sqrt(flex.max(s_s))
comp_f = None
else:
comp_f = fit(s_s, comp_s, 6)
rlimit = limit * max(comp_s)
for j, s in enumerate(s_s):
logger.debug(
"%f %f %f %f\n", s, 1.0 / math.sqrt(s), comp_s[j], comp_f[j]
)
try:
r_comp = 1.0 / math.sqrt(interpolate_value(s_s, comp_f, rlimit))
except Exception:
r_comp = 1.0 / math.sqrt(flex.max(s_s))
if self._params.plot:
plot = resolution_plot(ylabel="Completeness")
if comp_f is not None:
plot.plot(s_s, comp_f, label="fit")
plot.plot(s_s, comp_s, label="Completeness")
plot.plot_resolution_limit(r_comp)
plot.savefig("completeness.png")
return r_comp
def resolution_cc_half(self, limit=None):
"""Compute a resolution limit where cc_half < 0.5 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.cc_half
if self._params.cc_half_method == "sigma_tau":
cc_s = flex.double(
[b.cc_one_half_sigma_tau for b in self._merging_statistics.bins]
).reversed()
else:
cc_s = flex.double(
[b.cc_one_half for b in self._merging_statistics.bins]
).reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
p = self._params.cc_half_significance_level
if p is not None:
if self._params.cc_half_method == "sigma_tau":
significance = flex.bool(
[
b.cc_one_half_sigma_tau_significance
for b in self._merging_statistics.bins
]
).reversed()
cc_half_critical_value = flex.double(
[
b.cc_one_half_sigma_tau_critical_value
for b in self._merging_statistics.bins
]
).reversed()
else:
significance = flex.bool(
[b.cc_one_half_significance for b in self._merging_statistics.bins]
).reversed()
cc_half_critical_value = flex.double(
[
b.cc_one_half_critical_value
for b in self._merging_statistics.bins
]
).reversed()
# index of last insignificant bin
i = flex.last_index(significance, False)
if i is None or i == len(significance) - 1:
i = 0
else:
i += 1
else:
i = 0
if self._params.cc_half_fit == "tanh":
cc_f = tanh_fit(s_s[i:], cc_s[i:], iqr_multiplier=4)
else:
cc_f = fit(s_s[i:], cc_s[i:], 6)
logger.debug("rch: fits")
rlimit = limit * max(cc_s)
for j, s in enumerate(s_s[i:]):
logger.debug("%f %f %f %f\n", s, 1.0 / math.sqrt(s), cc_s[i + j], cc_f[j])
try:
r_cc = 1.0 / math.sqrt(interpolate_value(s_s[i:], cc_f, rlimit))
except Exception:
r_cc = 1.0 / math.sqrt(max(s_s[i:]))
logger.debug("rch: done : %s", r_cc)
if self._params.plot:
plot = resolution_plot("CC1/2")
plot.plot(s_s[i:], cc_f, label="fit")
plot.plot(s_s, cc_s, label="CC1/2")
if p is not None:
plot.plot(
s_s, cc_half_critical_value, label="Confidence limit (p=%g)" % p
)
plot.plot_resolution_limit(r_cc)
plot.savefig("cc_half.png")
return r_cc
def resolution_cc_ref(self, limit=None):
"""Compute a resolution limit where cc_ref < 0.5 (limit if
set) or the full extent of the data."""
if limit is None:
limit = self._params.cc_ref
intensities = self._intensities.merge_equivalents(
use_internal_variance=False
).array()
cc_s = flex.double()
for b in self._merging_statistics.bins:
sel = intensities.resolution_filter_selection(d_min=b.d_min, d_max=b.d_max)
sel_ref = self._reference.resolution_filter_selection(
d_min=b.d_min, d_max=b.d_max
)
d = intensities.select(sel)
dref = self._reference.select(sel_ref)
cc = d.correlation(dref, assert_is_similar_symmetry=False)
cc_s.append(cc.coefficient())
cc_s = cc_s.reversed()
s_s = flex.double(
[1 / b.d_min ** 2 for b in self._merging_statistics.bins]
).reversed()
if self._params.cc_half_fit == "tanh":
cc_f = tanh_fit(s_s, cc_s, iqr_multiplier=4)
else:
cc_f = fit(s_s, cc_s, 6)
logger.debug("rch: fits")
rlimit = limit * max(cc_s)
for j, s in enumerate(s_s):
logger.debug("%f %f %f %f\n", s, 1.0 / math.sqrt(s), cc_s[j], cc_f[j])
try:
r_cc = 1.0 / math.sqrt(interpolate_value(s_s, cc_f, rlimit))
except Exception:
r_cc = 1.0 / math.sqrt(max(s_s))
logger.debug("rch: done : %s", r_cc)
if self._params.plot:
plot = resolution_plot("CCref")
plot.plot(s_s, cc_f, label="fit")
plot.plot(s_s, cc_s, label="CCref")
plot.plot_resolution_limit(r_cc)
plot.savefig("cc_ref.png")
return r_cc
|
#####################################################
# Content-Aware Detection of Timestamp Manipulation #
# IEEE Trans. on Information Forensics and Security #
# R. Padilha, T. Salem, S. Workman, #
# F. A. Andalo, A. Rocha, N. Jacobs #
#####################################################
##### DESCRIPTION
"""
Example of training script considering DenseNet as backbone,
location and satellite included as input modalities, and
multi-task optimization (including transient attribute estimation)
"""
#########################
# IMPORTS & DEFINITIONS #
#########################
import os, sys
## Dataloader
sys.path.append("../datasets")
from dataLoader import DataLoader
## Architectures
sys.path.append("../architectures")
from model_DenseNet import buildModel
## Training-related imports
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import mean_squared_error
## GPU selection
import tensorflow as tf
gpuNumber = 3
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus[gpuNumber], 'GPU')
tf.config.experimental.set_memory_growth(gpus[gpuNumber], False)
## Training definitions
batchSize = 32
nEpochs = 30
regTerm = 0.01
pathToSaveCheckpoints = "./models/"
#######################
## Custom MSE ##
#######################
# We will compute the MSE only for the consistent inputs
def transient_mse(y_true, y_pred):
return tf.sum(mean_squared_error(y_true[0::2,:], y_pred[0::2,:]), axis=-1)
#######################
## Build Architecture
#######################
model = buildModel(includeLocation=True, includeSatellite=True,
outputTransientAttributes=True, regTerm=0.01)
print(model.summary())
#######################
## Training Setup
#######################
dl = DataLoader(setToLoad="train", includeLocation=True,
includeSatellite=True, outputTransientAttributes=True)
trainPairs = dl.setSize
trainBatchesPerEpoch = int(trainPairs/batchSize)
print("-----------> Training Setup")
print("Number of Epochs = ", nEpochs)
print("L2 Regularization = ", regTerm)
print("Batch Size = ", batchSize)
print("Train Batches per Epoch = ", trainBatchesPerEpoch)
# Create dirs to save checkpoints and logs
if not os.path.exists(pathToSaveCheckpoints):
os.makedirs(pathToSaveCheckpoints)
# Instantiate callbacks
checkpointer = ModelCheckpoint(filepath=os.path.join(pathToSaveCheckpoints,"weights.{epoch:02d}-{loss:.5f}.hdf5"), verbose=0)
# Compile model
opt = Adam(learning_rate=0.00001)
model.compile(optimizer=opt, loss={"consist_fc3":"categorical_crossentropy",
"gr_trans_fc3":transient_mse,
"ae_loc_time_trans_fc3":transient_mse},
metrics={"consist_fc3":"accuracy",
"gr_trans_fc3":"mae",
"ae_loc_time_trans_fc3":"mae"})
#######################
## Training
#######################
model.fit_generator(dl.loadImagesInBatches(batchSize),
steps_per_epoch=trainBatchesPerEpoch, epochs=nEpochs,
verbose=1, callbacks=[checkpointer])
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import unittest
from .context import engines
HANDLE = 'string.Template'
class TestStringTemplate(unittest.TestCase):
def test_valid_engine(self):
self.assertIn(HANDLE, engines.engines)
engine = engines.engines[HANDLE]
assert issubclass(engine, engines.Engine)
def test_escape(self):
engine = engines.engines[HANDLE]
template = engine(
'Several escaped dollar signs:\n'
'$$ $$ $$$$$$\n',
)
result = template.apply({
'random': 'value',
'$': 'provocation',
})
self.assertMultiLineEqual(result,
'Several escaped dollar signs:\n'
'$ $ $$$\n'
)
def test_plain_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'$beilage.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_curly_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'${essen} mit\n'
'${beilage}.\n',
)
result = template.apply({
'random': 'value',
'essen': 'Szegediner Gulasch',
'beilage': 'Kartoffeln',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'Szegediner Gulasch mit\n'
'Kartoffeln.\n'
)
def test_strict_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
)
self.assertRaises(Exception, template.apply, ({
'random': 'value',
}))
def test_tolerant_template_missing_identifier(self):
engine = engines.engines[HANDLE]
template = engine(
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n',
tolerant=True,
)
result = template.apply({
'random': 'value',
})
self.assertMultiLineEqual(result,
'Heute gibt es\n'
'$essen mit\n'
'${beilage}.\n'
)
if __name__ == '__main__':
unittest.main()
|
"""Pytorch interface for deodr."""
__all__ = [
"ColoredTriMeshPytorch",
"Scene3DPytorch",
"CameraPytorch",
"LaplacianRigidEnergyPytorch",
"TriMeshPytorch",
"MeshRGBFitterWithPose",
"MeshDepthFitter",
]
from .differentiable_renderer_pytorch import CameraPytorch, Scene3DPytorch
from .laplacian_rigid_energy_pytorch import LaplacianRigidEnergyPytorch
from .mesh_fitter_pytorch import MeshDepthFitter, MeshRGBFitterWithPose
from .triangulated_mesh_pytorch import ColoredTriMeshPytorch, TriMeshPytorch
|
#This script can be used to compare results from the C implementation with results from the TVM implementation
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import os
def convertImage(img_path):
resized_image = Image.open(img_path).resize((96, 96))
#plt.imshow(resized_image)
#plt.show()
image_data = np.asarray(resized_image).astype("int8")
# Add a dimension to the image so that we have NHWC format layout
image_data = np.expand_dims(image_data, axis=0)
#1,96,96,1
image_data = (0.21 * image_data[:,:,:,:1]) + (0.72 * image_data[:,:,:,1:2]) + (0.07 * image_data[:,:,:,-1:])
image_data_int8 = image_data.astype("int8")
print(np.shape(image_data_int8))
return image_data_int8
folder_path = os.getcwd()
folder_path += '/val2017'
print(folder_path)
count = 0
for filename in sorted(os.listdir(folder_path)):
if count > 100: break
image_path = folder_path +'/'+ filename
image_int8 = convertImage(image_path)
image_int8 = np.resize(image_int8, (96,96))
#print(image_int8)
#image = Image.open(path)
print(filename + ' 0')
txt_file = 'test_arrays/'+str(filename) + '_array.h'
with open(txt_file, "w") as f:
f.write('#define IMAGE_DIM 96 \n')
f.write('extern const int8_t IMAGE[IMAGE_DIM*IMAGE_DIM]= {')
np.savetxt(f, image_int8.astype("int8"), newline=",",delimiter = ",", fmt='%d')
f.write('};')
count += 1
|
from abc import ABC, abstractmethod
import numpy as np
class Node(ABC):
def __init__(self):
self._has_consumers = False
# Sum of partial derivatives of each consumer-gate
# with respect to this node
self._cumulative_consumers_grad = 0.0
def __repr__(self):
return '{}({}, {})'.format(
self.__class__.__name__,
self.value,
self._cumulative_consumers_grad
)
def __float__(self):
return float(self.value)
class Placeholder(Node):
def __init__(self, value=None):
Node.__init__(self)
self.value = value
class Parameter(Node):
def __init__(self, value=None):
Node.__init__(self)
self.value = value
|
x= int( input('Digite um número qualquer:'))
print(f'O dobro é {2 * x}, o triplo é {3 * x} e a raiz quadrada é {x ** (1/2)}.')
|
class VigenereCipher:
def __init__(self, message, key):
self.message = message
self.key = key * int(len(self.message)/len(key)) + key[:len(self.message) % len(key)]
self.alphabet = {chr(i): i-97 for i in range(97, 123)}
def cipher_decipher(self, to_cipher, t):
if to_cipher:
ciphered_message = ""
for chr_i, chr_j in zip(self.message, self.key):
ciphered_message += chr((self.alphabet[chr_i] + self.alphabet[chr_j]) % 26 + 97)
return "".join(' ' * (n % int(t) == 0 and n != 0) + l for n, l in enumerate(list(ciphered_message)))
else:
deciphered_message = ""
for chr_i, chr_j in zip(self.message, self.key):
deciphered_message += chr((self.alphabet[chr_i] - self.alphabet[chr_j]) % 26 + 97)
return deciphered_message
def main():
message = input("Insert Message:\n")
# message = "TO BE OR NOT TO BE THAT IS THE QUESTION"
# message = "ksmeh zbblk smemp ogajx sejcs flzsy"
# key = "relations"
# t = 5
key = input("Insert Key:\n")
t = input("Insert t value:\n")
cipher = VigenereCipher(message.lower().replace(" ", ""), key.lower().replace(" ", ""))
ciphered_message = cipher.cipher_decipher(1, t)
print("El mensaje a enviar es:", ciphered_message, "\n")
decipher = VigenereCipher(ciphered_message.lower().replace(" ", ""), key.lower().replace(" ", ""))
deciphered_message = decipher.cipher_decipher(0, t)
print("El mensaje a enviar es:", deciphered_message, "\n")
if __name__ == '__main__':
main() |
# \MODULE\-------------------------------------------------------------------------
#
# CONTENTS : BumbleBee
#
# DESCRIPTION : Nanopore Basecalling
#
# RESTRICTIONS : none
#
# REQUIRES : none
#
# ---------------------------------------------------------------------------------
# Copyright 2021 Pay Giesselmann, Max Planck Institute for Molecular Genetics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Written by Pay Giesselmann
# ---------------------------------------------------------------------------------
import os
import yaml
import logging
import random
import tqdm
import torch
import itertools
import collections
import pkg_resources as pkg
import numpy as np
from torchinfo import summary
from torch.utils.tensorboard import SummaryWriter
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from bumblebee.db import ModDatabase
from bumblebee.ds import ModDataset
from bumblebee.optimizer import Lookahead
from bumblebee.util import running_average, WarmupScheduler
import bumblebee.modnn
log = logging.getLogger(__name__)
def main(args):
# init torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:{}".format(args.device) if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True
log.info("Using device {}".format(device))
# output directory
output_dir = args.prefix
os.makedirs(output_dir, exist_ok=True)
weights_dir = os.path.join(args.prefix, 'weights')
os.makedirs(weights_dir, exist_ok=True)
# load model config
run_config = os.path.join(output_dir, 'config.yaml')
pkg_config = pkg.resource_filename('bumblebee', 'config/templates/{}.yaml'.format(args.config))
if os.path.isfile(run_config):
# resume training with existing config
with open(run_config, 'r') as fp:
config = yaml.safe_load(fp)
log.info("Loaded config file {}".format(run_config))
elif os.path.isfile(args.config):
# config is provided as file
with open(args.config, 'r') as fp:
config = yaml.safe_load(fp)
log.info("Loaded config file {}".format(args.config))
elif os.path.isfile(pkg_config):
# config is found in installation path
with open(pkg_config, 'r') as fp:
config = yaml.safe_load(fp)
log.info("Loaded config file {}".format(pkg_config))
else:
log.error("Could not find config file for {}".format(args.config))
exit(-1)
# init dataset and dataloader
log.info("Loading training dataset")
ds_train = ModDataset(args.db, args.mod_ids,
max_features=args.max_features,
min_score=args.min_score,
min_weight=args.min_weight,
max_weight=args.max_weight,
config=config.get('ds_train') or {})
if args.train_fraction < 1.0:
ds_train = torch.utils.data.Subset(ds_train,
np.arange(int(args.train_fraction * len(ds_train))))
dl_train = torch.utils.data.DataLoader(ds_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=4, worker_init_fn=ModDataset.worker_init_fn,
prefetch_factor=args.batch_size,
pin_memory=True,
drop_last=True)
log.info("Loading evaluation dataset")
ds_eval = ModDataset(args.db, args.mod_ids,
train=False,
max_features=args.max_features,
min_score=args.min_score)
if args.train_fraction < 1.0:
ds_eval = torch.utils.data.Subset(ds_eval,
np.arange(int(args.train_fraction * len(ds_eval))))
dl_eval = torch.utils.data.DataLoader(ds_eval,
batch_size=args.batch_size,
shuffle=False,
num_workers=1, worker_init_fn=ModDataset.worker_init_fn,
prefetch_factor=args.batch_size,
pin_memory=True,
drop_last=True)
eval_rate = np.ceil(len(dl_train) / len(dl_eval)).astype(int)
log.info("Loaded {} train and {} evaluation batches.".format(
len(dl_train), len(dl_eval)))
# copy config into output directory
with open(run_config, 'w') as fp:
yaml.dump(config, fp)
# init model
try:
model = getattr(bumblebee.modnn, config['model'])(args.max_features, config['params'])
except Exception as e:
log.error("Coud not find model definition for {}:\n{}".format(config['model'], e))
exit(-1)
# model summary
_, _, _batch = next(iter(dl_eval))
summary(model,
input_data=[_batch['lengths'],
_batch['kmers'],
_batch['offsets'],
_batch['features']],
device="cpu", depth=2)
model.to(device)
def avg_fn(avg_mdl, mdl, step):
scale = min(0.9999, step/1e5)
return scale * avg_mdl + (1-scale) * mdl
swa_model = torch.optim.swa_utils.AveragedModel(model, avg_fn=avg_fn, device=device)
swa_model.eval()
# loss and optimizer
criterion = torch.nn.CrossEntropyLoss(reduction='none')
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, amsgrad=False)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)
if args.lr_schedule == 'warmup':
optimizer = Lookahead(optimizer, k=5, alpha=0.5) # Initialize Lookahead
lr_scheduler = WarmupScheduler(optimizer, config['params']['d_model'],
warmup_steps=4000)
elif args.lr_schedule == 'cyclic':
lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer,
max_lr=args.lr,
base_lr=args.lr/100,
step_size_up=4000,
step_size_down=1000,
cycle_momentum=False)
elif args.lr_schedule == 'plateau':
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='max',
factor=0.5,
patience=len(dl_train),
cooldown=2*len(dl_train))
# load checkpoint
chkpt_file = os.path.join(args.prefix, 'latest.chkpt')
out_file = os.path.join(args.prefix, 'final.chkpt')
if os.path.isfile(chkpt_file):
checkpoint = torch.load(chkpt_file)
step_total = checkpoint['step_total']
last_epoch = checkpoint['last_epoch']
model.load_state_dict(checkpoint['model'])
swa_model.load_state_dict(checkpoint['swa_model'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.lr_schedule:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
log.info("Loaded latest checkpoint: Epoch {} at step {}".format(
last_epoch+1, step_total))
else:
step_total = 0
last_epoch = 0
# running loss and accuracy
train_loss = running_average(max_len=500)
train_acc = running_average(max_len=500)
eval_loss = running_average(max_len=50)
eval_acc = running_average(max_len=50)
# summary writer
writer = SummaryWriter(output_dir, purge_step=step_total, max_queue=50)
# train step
def train_step(labels, weights, batch):
labels = labels.to(device)
weights = weights.to(device)
lengths = batch['lengths']
kmers = batch['kmers'].to(device)
offsets = batch['offsets'].to(device)
features = batch['features'].to(device)
# zero gradients
for _ in range(args.batch_echo + 1):
optimizer.zero_grad()
# forward pass
logits, model_loss, metrics = model(lengths, kmers, offsets, features)
prediction = torch.argmax(logits, dim=1)
accuracy = torch.sum(prediction == labels).item() / args.batch_size
loss = criterion(logits, labels)
if model_loss is not None:
loss += model_loss
if args.weighted:
scale = 1 / (torch.log(weights) + 1)
#loss *= scale / torch.sum(scale) * args.batch_size
loss *= scale
loss = torch.mean(loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
optimizer.step()
return loss.item(), accuracy, metrics
# eval step
def eval_step(labels, weights, batch, swa=False):
with torch.no_grad():
labels = labels.to(device)
#weights = weights.to(device)
lengths = batch['lengths']
kmers = batch['kmers'].to(device)
offsets = batch['offsets'].to(device)
features = batch['features'].to(device)
# forward pass
if swa:
logits, model_loss, metrics = swa_model(lengths, kmers, offsets, features)
else:
model.eval()
logits, model_loss, metrics = model(lengths, kmers, offsets, features)
model.train()
prediction = torch.argmax(logits, dim=1)
accuracy = torch.sum(prediction == labels).item() / args.batch_size
loss = criterion(logits, labels)
if model_loss is not None:
loss += model_loss
loss = torch.mean(loss)
return loss.item(), accuracy, metrics
def save(fout, swa=False):
if not swa:
torch.save(model.state_dict(),
os.path.join(weights_dir, 'weights_{}.pt'.format(step_total)))
else:
torch.save(swa_model.state_dict(),
os.path.join(weights_dir, 'weights_swa_{}.pt'.format(step_total)))
torch.save({
"step_total": step_total,
"last_epoch": epoch,
"model": model.state_dict(),
"swa_model": swa_model.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict() if args.lr_schedule else None
}, fout)
# compute epochs if target steps are given
if args.epochs:
max_steps = args.epochs * len(dl_train)
max_epochs = args.epochs
swa_start_step = args.swa_start * len(dl_train)
else:
max_steps = args.steps
max_epochs = np.ceil(args.steps / len(dl_train)).astype(int)
swa_start_step = args.swa_start
# training loop
for epoch in range(last_epoch + 1, max_epochs + 1):
dl_eval_iter = iter(dl_eval)
with tqdm.tqdm(desc='Epoch {}'.format(epoch), total=len(dl_train)) as pbar:
for step, (labels, weights, batch) in enumerate(dl_train):
# train step
_train_loss, _train_acc, metrics = train_step(labels,
weights, batch)
train_loss.append(_train_loss)
train_acc.append(_train_acc)
writer.add_scalar('training/loss', _train_loss, step_total)
writer.add_scalar('training/accuracy', _train_acc, step_total)
writer.add_scalar("learning rate", optimizer.param_groups[0]['lr'], step_total)
for key, tensor in metrics.items():
writer.add_scalar('training/{}'.format(key), torch.mean(tensor).item(), step_total)
# swa
if step_total > swa_start_step:
swa_model.update_parameters(model)
# learning rate
if args.lr_schedule and args.lr_schedule == 'plateau':
lr_scheduler.step(eval_acc.mean())
elif args.lr_schedule:
lr_scheduler.step()
# eval step
if step % eval_rate == 0:
labels, weights, batch = next(dl_eval_iter)
_eval_loss, _eval_acc, kwout = eval_step(labels,
weights, batch,
swa=step_total > swa_start_step)
eval_loss.append(_eval_loss)
eval_acc.append(_eval_acc)
writer.add_scalar('validation/loss', _eval_loss, step_total)
writer.add_scalar('validation/accuracy', _eval_acc, step_total)
# raw stats output
if args.stats:
with open(os.path.join(output_dir, 'stats.tsv'), 'a') as fp:
print('\t'.join([
str(step_total),
str(train_loss.mean()),
str(train_acc.mean()),
str(eval_loss.mean()),
str(eval_acc.mean())]), file=fp)
# progress
pbar.update(1)
pbar.set_postfix_str("Train: {:.3f} / {:.3f} Eval: {:.3f} / {:.3f}".format(
train_loss.mean(),
train_acc.mean(),
eval_loss.mean(),
eval_acc.mean()))
step_total += 1
# break if max steps reached
if step_total >= max_steps:
# stop epoch
break
if step_total >= max_steps:
# stop training
if step_total % len(dl_train) == 0:
# save checkpoint if end of epoch
save(chkpt_file, swa=step_total > swa_start_step)
break
# save epoch to resume training later
save(chkpt_file, swa=step_total > swa_start_step)
# final save
save(out_file, swa=step_total > swa_start_step)
# close & cleanup
writer.close()
def argparser():
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
add_help=False
)
parser.add_argument("db", type=str)
parser.add_argument("config", type=str)
parser.add_argument("--mod_ids", nargs='+', required=True, type=int)
parser.add_argument("--prefix", default='.', type=str)
parser.add_argument("--device", default=0, type=int)
parser.add_argument("--min_score", default=1.0, type=float)
parser.add_argument("--max_features", default=40, type=int)
parser.add_argument("--min_weight", default=1, type=int)
parser.add_argument("--max_weight", default=10000, type=int)
parser.add_argument("--weighted", action='store_true')
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--batch_echo", default=0, type=int)
parser.add_argument("--train_fraction", default=1.0, type=float)
parser.add_argument("--optimizer", default='Adam',
choices=['SGD', 'Adam'])
parser.add_argument("--lr", default=1.0, type=float)
parser.add_argument("--lr_schedule", default=None,
choices=['warmup', 'cyclic', 'plateau'])
parser.add_argument("--clip_grad_norm", default=1.5, type=float)
group = parser.add_mutually_exclusive_group()
group.add_argument("--epochs", type=int)
group.add_argument('--steps', type=int)
parser.add_argument("--swa_start", default=0, type=int)
parser.add_argument("--stats", action='store_true')
return parser
|
# -*- coding:utf-8 -*-
from deeptables.preprocessing.transformer import MultiLabelEncoder, MultiKBinsDiscretizer, DataFrameWrapper, \
LgbmLeavesEncoder, CategorizeEncoder
|
from django.utils.translation import gettext as _
class Successfull(object):
pass
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
import os
import shutil
import package
def AddNoAsmArg(config_path):
"""Add --disable-assembler to config scripts."""
shutil.move(config_path, config_path + '.real')
with open(config_path, 'w') as f:
f.write(
'#!/bin/sh\n'
'%s.real --disable-assembler "$@"\n' % config_path)
os.chmod(config_path, 0755)
class Package(package.Package):
"""nettle package."""
def __init__(self, apt_version):
super(Package, self).__init__('nettle', apt_version)
def PreBuild(self, source_directory, env, custom_bin_dir):
AddNoAsmArg(os.path.join(source_directory, 'configure'))
|
def metric(tp, tn, fp, fn):
tot = tp + tn + fp + fn
acc = (tp + tn)/(tp + tn + fp + fn)
prec = tp / (tp + fp)
rec = tp / tp + fn
f1 = 2*tp/(2*tp+fp+fn)
FOR = fn / (fn + tn)
return acc,prec,rec,f1,FOR
|
import numpy as np
import cv2
import copy
from osgeo import gdal, osr, ogr
class GeoImage():
def __init__(self,
imgPath,
gap=100,
subsize=1024):
self.imgPath = imgPath
self.gap = gap
self.subsize = subsize
self.slide = self.subsize - self.gap
self.imgPath = imgPath
self.splits = []
self.BuildSplits(1)
ds = gdal.Open(imgPath)
self.afineTrnsform = ds.GetGeoTransform()
self.tifProjection = osr.SpatialReference(wkt=ds.GetProjection())
del ds
def getProjection(self):
return self.tifProjection
def getCv2ImgFromSplit(self, xyOffset):
left, up = xyOffset
subimg = copy.deepcopy(self.resizeimg[up: (up + self.subsize), left: (left + self.subsize)])
return subimg
def BuildSplits(self, rate):
img = cv2.imread(self.imgPath)
# print("img.shape = {}".format(img.shape))
# convert image from bgr to rgb
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
assert np.shape(img) != ()
if (rate != 1):
self.resizeimg = cv2.resize(img, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
else:
self.resizeimg = img
weight = np.shape(self.resizeimg)[1]
height = np.shape(self.resizeimg)[0]
left, up = 0, 0
while (left < weight):
if (left + self.subsize >= weight):
left = max(weight - self.subsize, 0)
up = 0
while (up < height):
if (up + self.subsize >= height):
up = max(height - self.subsize, 0)
self.splits.append([left, up])
if (up + self.subsize >= height):
break
else:
up = up + self.slide
if (left + self.subsize >= weight):
break
else:
left = left + self.slide
def getSplits(self): return self.splits
if __name__ == '__main__':
geoImg = GeoImage(r'/home/kirill/Downloads/GeoData/Davis_Monthan/Davis_Monthan_AFB_20180814.tif')
print(geoImg.getSplits())
|
#!/usr/bin/env python
"""
setup.py file for SWIG example
"""
import os
from distutils.core import setup, Extension
setup (name = 'kcore',
version = '0.0.1',
author = "Kuzzle",
description = """Kuzzle sdk""",
py_modules = ["kcore"],
)
|
# Worker: Google |
def FibonacciChecker(num):
# code goes here
fn = fn_1 = 0
fn_2 = 1
while fn <= num:
if fn == num:
return "yes"
fn = fn_1 + fn_2
fn_2 = fn_1
fn_1 = fn
return "no"
# keep this function call here
print(FibonacciChecker(input()))
|
import dataclasses
from typing import Optional
from esque.io.data_types import NoData, String
from esque.io.messages import Data
from esque.io.serializers.base import DataSerializer, SerializerConfig
@dataclasses.dataclass(frozen=True)
class StringSerializerConfig(SerializerConfig):
encoding: str = "UTF-8"
class StringSerializer(DataSerializer[StringSerializerConfig]):
data_type: String = String()
def serialize(self, data: Data) -> Optional[bytes]:
if isinstance(data.data_type, NoData):
return None
return data.payload.encode(encoding=self.config.encoding)
def deserialize(self, raw_data: Optional[bytes]) -> Data:
if raw_data is None:
return Data.NO_DATA
return Data(raw_data.decode(encoding=self.config.encoding), self.data_type)
|
"""
Class to interface with a redash server
"""
import click
import requests
from ruamel import yaml
class Redash:
"""
Class to upload/download queries from redash
"""
def __init__(self, url, api_key):
self.url = url
self.api_key = api_key
def getMaxOfList(self, list: list):
return max(list)
def Get_Queries(self, dontfilter=False):
"""
Get all queries from the given redash server
It does so in bulk queries. But it doesn't get the visualizations
In case that you don't want to get extra contents from the queries to be filtered
you can pass the dontfilter param.
"""
queries = []
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/queries".format(self.url)
has_more = True
page = 1
while has_more:
response = requests.get(path, headers=headers, params={"page": page, "order": "created_at"}).json()
queries.extend(response["results"])
has_more = page * response["page_size"] + 1 <= response["count"]
page += 1
if not dontfilter:
queries = self.filter_fields_query_list(queries)
return queries
def Get_Full_Query_By_ID(self, id: int):
"""
Get single query from the given redash server
"""
queries = []
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/queries".format(self.url)
response = requests.get(
path + "/" + str(id), headers=headers
).json()
full_query = self.filter_fields_query(response)
if full_query:
queries.append(full_query)
return queries
def Get_Full_Queries(self, queries):
"""
Download the query and its visualizations.
If you download the queries in bulk, they don't contain the visualizations
This call needs to first Get_Queries and then do one request per query to the
server to get the visualizations
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/queries".format(self.url)
full_queries = []
for query in queries:
response = requests.get(
path + "/" + str(query["id"]), headers=headers
).json()
full_query = self.filter_fields_query(response)
if full_query:
full_queries.append(full_query)
return full_queries
def Put_Queries(self, old_queries, new_queries):
"""
Upload the queries to the given redash server
If it has visualizations it will put them also
It uses the field (hack) `redpush_id` to find the query in redash server
and update it if there. If the query being uploaded doesn't have that property
it will not be uploaded.
The new queries list is modified on the process. So don't rely on it afterwards
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/queries".format(self.url)
# we get dashboards before, as an optimization. Before we were doing it on each widget, but it is
# very expensive, so we move it out, and pass it to the chain down. We save lots of extra calls to
# the server making it so much faster to run
dash_list = self.Get_Dashboards()
for query in new_queries:
if "redpush_id" not in query:
print("Query without tracking id, ignored")
continue
redpush_id = query["redpush_id"]
query.pop("redpush_id", None)
old_query = self.find_by_redpush_id(old_queries, redpush_id)
# print(old_query)
extra_path = ""
if old_query != None:
# we are updating the query
id = old_query["id"]
print("updating query " + str(id), flush=True)
extra_path = "/" + str(id)
else:
print("creating new query " + query["name"], flush=True)
if "options" not in query:
query["options"] = {}
query["options"]["redpush_id"] = redpush_id
query["is_draft"] = False
query["is_archived"] = False
# if a query has params, we accept subqueries with redpush_id, but we need to convert it
if "parameters" in query["options"]:
missingSubQuery = False
for param in query["options"]["parameters"]:
if "redpush_id" in param:
sub_redpush_id = param["redpush_id"]
param.pop("redpush_id", None)
sub_real_id = self.find_by_redpush_id(
old_queries, sub_redpush_id
)
if sub_real_id == None:
# The linked query it is not yet deployed, or wrong.
print("Subquery not found", sub_redpush_id)
missingSubQuery = True
break
else:
param["queryId"] = sub_real_id["id"]
if missingSubQuery:
# there was a missing subquery, so we don't push this query
print("Query with missing subqueries, ignored")
continue
visualizations = None
if "visualizations" in query:
visualizations = query["visualizations"]
# visualizations need to be uploaded in a diff call
query["visualizations"] = None
response = requests.post(
path + extra_path, headers=headers, json=query
).json()
id = response["id"]
# Now we handle the visualization
if visualizations != None:
for visualization in visualizations:
visualization["query_id"] = id
# we might have received a new copy of the dashboards from the server
dash_list = self.Put_Visualization(
visualization, old_query, dash_list
)
# print(response)
if old_query == None:
self.Execute_Query(response)
def Execute_Query(self, query):
"""
Once a query is created we need to try to query the results so redash executes it and also
the scheduled ones get noticed.
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/query_results".format(self.url)
requests.post(path, headers=headers, json=query)
def Archive_Missing_Queries(self, server_queries, new_queries):
"""
Make a diff between server_queries and the new_queries,
the ones appearing in server_queries but not in new_queries
are archived.
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/queries".format(self.url)
for query in server_queries:
deleteQuery = False
if "options" in query:
if "redpush_id" in query["options"]:
redpush_id = query["options"]["redpush_id"]
new_query = self.find_by_redpush_id(new_queries, redpush_id)
deleteQuery = new_query == None
else:
deleteQuery = True
print("Query without tracking id, deleting…", flush=True)
# if query doesn't have redpush_id we will get rid of it
else:
deleteQuery = True
print("Query without tracking id, deleting…", flush=True)
# if query doesn't have redpush_id we will get rid of it
if deleteQuery:
# the server query isn't in the file, so we archive it
id = query["id"]
print("deleting query " + str(id), flush=True)
extra_path = "/" + str(id)
response = requests.delete(
path + extra_path, headers=headers, json=query
).json()
# if response.status_code != 200:
# print('error deleting query', response)
def Put_Visualization(self, visualization, old_query, dash_list):
"""
Upload the visualizations to the given redash server
If it has visualizations it will put them also
It uses the field (hack) `redpush_id` to find the query in redash server
and update it if there. If the query being uploaded doesn't have that property
it will not be uploaded.
If a visualization has also the `redpush_dashboard` it will be added to that dashboard
(if not there already)
It needs also the old query if already there, so we update the visuals and
not create duplicates
It returns the dash_list, which might or might not be the same we received. If nothing was
added then it is the same, otherwise it was refreshed from the server and we have new contents
"""
if "redpush_id" not in visualization:
print("Visualization without tracking id, ignored")
return
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/visualizations".format(self.url)
# redash doesn't allow our extra properties, so remove them
redpush_id = visualization["redpush_id"]
del visualization["redpush_id"]
redpush_dashboards = []
if "redpush_dashboards" in visualization:
redpush_dashboards = visualization["redpush_dashboards"]
del visualization["redpush_dashboards"]
extra_path = ""
if old_query != None:
# we are updating so we need to find the id first
filtered = list(
filter(
lambda x: "redpush_id" in x and x["redpush_id"] == redpush_id,
old_query["visualizations"],
)
)
if filtered:
if len(filtered) > 1:
print("There are repeated visuals. Using the first")
old_id = filtered[0]["id"]
extra_path = "/{}".format(old_id)
if "options" not in visualization:
visualization["options"] = {}
visualization["options"]["redpush_id"] = redpush_id
response = requests.post(
path + extra_path, headers=headers, json=visualization
).json()
visual_id = response["id"] # the id we got from the just added visual
# if there is redpush_dashboard then lets check if we need to add to dashboard
if redpush_dashboards:
for widget_properties in redpush_dashboards:
# check if that dashboard is already in server, and if not create it
# check against name, as if deleted it would get a new slug
filtered_dash_list = list(
filter(lambda x: x["name"] == widget_properties["name"], dash_list)
)
if filtered_dash_list:
if len(filtered_dash_list) > 1:
print("More than one dashboard with the same id, error!!!")
dash = filtered_dash_list[0]
else:
print("Creating dashboard: ", widget_properties["name"])
dash = self.Create_Dashboard(widget_properties["name"])
dash_list = self.Get_Dashboards()
# check if visual already in dashboard, and if not add it
need_to_add_widget = False
if "widgets" in dash and dash["widgets"]:
# find the widget if already there
filtered_widget_list = list(
filter(
lambda x: "visualization" in x
and x["visualization"]["id"] == visual_id,
dash["widgets"],
)
)
if not filtered_widget_list:
need_to_add_widget = True
else:
need_to_add_widget = True
# as we have the visualization from file, we need to put the id
visualization["id"] = visual_id
row = 0
col = 0
if "row" in widget_properties and widget_properties["row"] > 0:
row = widget_properties["row"]
if "col" in widget_properties and widget_properties["col"] > 0:
col = widget_properties["col"]
if need_to_add_widget:
self.Create_Widget(dash["id"], visualization, widget_properties)
else:
first = filtered_widget_list[0]
self.Update_Widget(dash["id"], first["id"], widget_properties)
return dash_list # In case we have refreshed it when adding a new one
def Create_Widget(self, dashboard_id, visual, widget_properties):
"""
Create a widget into a dashboard
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/widgets".format(self.url)
position = self.get_Widget_position(widget_properties)
widget = {
"visualization": visual,
"dashboard_id": dashboard_id,
"visualization_id": visual["id"],
"options": {"position": position},
"width": 1,
}
response = requests.post(path, headers=headers, json=widget).json()
def get_Widget_position(self, widget_properties):
"""
From the properties of a visualization in the yaml, we generate the position properties
that redash expects on the API
"""
size = "medium"
if "size" in widget_properties and len(widget_properties["size"]) > 0:
size = widget_properties["size"]
col = 0
if "col" in widget_properties and widget_properties["col"] > 0:
col = widget_properties["col"]
row = 0
if "row" in widget_properties and widget_properties["row"] > 0:
row = widget_properties["row"]
multiplierDef = {"small": 2, "medium": 3, "large": 1}
sizeXDef = { # defining how big in X the widgets are
"small": 2,
"medium": 3,
"large": 6, # max size in redash
}
sizeYDef = { # defining how big in X the widgets are
"small": 5,
"medium": 9,
"large": 12,
}
multiplier = multiplierDef[size]
position = {
"autoHeight": False,
"row": row,
"col": col * multiplier,
"sizeX": sizeXDef[size],
"sizeY": sizeYDef[size],
}
return position
def Update_Widget(self, dashboard_id, widget_id, widget_properties):
"""
Update a widget already in a dashboard
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/widgets/{}".format(self.url, widget_id)
position = self.get_Widget_position(widget_properties)
widget = {
"dashboard_id": dashboard_id,
"options": {"position": position},
"text": "",
"width": 1,
}
response = requests.post(path, headers=headers, json=widget).json()
def Get_Dashboards(self):
"""
Get all dashboards from the given redash server
For that it needs to first get the list and then get the details of each one
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/dashboards?page=1&page_size=100".format(self.url)
dash_id_list = requests.get(path, headers=headers).json()
path_id_template = "{}/api/dashboards/{}"
dashboards = []
# now we get the details
if (
"results" not in dash_id_list
): # in redash 5 the api has changed, they come in the results object
return dashboards
for dash_id in dash_id_list["results"]:
slug = dash_id["slug"]
path_id = path_id_template.format(self.url, slug)
dashboard = requests.get(path_id, headers=headers).json()
# we need to filter some stuff, mostly inside the widgets
dashboard = self.filter_fields_blacklist(
dashboard,
[
"updated_at",
"created_at",
"is_archived",
"is_draft",
"version",
"layout",
"can_edit",
"user_id",
],
)
if "widgets" in dashboard:
filtered_widgets = []
for widget in dashboard["widgets"]:
filt_widget = self.filter_fields_blacklist(
widget,
[
"updated_at",
"created_at",
"is_archived",
"is_draft",
"version",
],
)
filtered_widgets.append(filt_widget)
dashboard["widgets"] = filtered_widgets
dashboards.append(dashboard)
return dashboards
def Create_Dashboard(self, name):
"""
Create a dashboard using the name both for the name and slug (the part of the path at the end of the dashboard url)
Warning, this function doesn't check if the dashboard with that name is already created, and if it is
it will create a duplicate
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/dashboards".format(self.url)
dash = {"name": name}
response = requests.post(path, headers=headers, json=dash).json()
# as we want it published, we need a second request to update it
response["is_draft"] = False
update = self.filter_fields_blacklist(
response, ["updated_at", "created_at", "version"]
)
response = requests.post(
path + "/" + str(response["id"]), headers=headers, json=update
).json()
# This call returns an error but still makes the change :)
return update
def filter_fields_query(self, query):
"""
Remove all unneeded fields of the query from redash.
That means mostly the ones that cannot be sent when creating a new query
it also does the hack of moving the redpush_id from the options to the top level of the query
"""
new_query = {}
for valid_key in [
"name",
"description",
"query",
"id",
"data_source_id",
"options",
"visualizations",
]:
if valid_key in query:
if valid_key == "visualizations":
# if there is a visualizations key, we need to do some cleanup also
new_query[valid_key] = list(
map(
lambda i: self.filter_fields_blacklist(
i, ["created_at", "updated_at"]
),
query[valid_key],
)
)
for visualization in new_query["visualizations"]:
if (
"options" in visualization
and "redpush_id" in visualization["options"]
):
redpush_id = visualization["options"]["redpush_id"]
del visualization["options"]["redpush_id"]
visualization["redpush_id"] = redpush_id
elif valid_key == "options":
# check if we have the redpush_id and if we do put it in the query
if "redpush_id" in query["options"]:
new_query["redpush_id"] = query["options"]["redpush_id"]
new_query[valid_key] = self.filter_fields_blacklist(
query[valid_key], ["redpush_id"]
) # we don't want our internal id there
else:
new_query[valid_key] = query[valid_key]
return new_query
def filter_fields_query_list(self, queries):
"""
Remove all unneeded fields of the query from redash.
That means mostly the ones that cannot be sent when creating a new query
"""
new_queries = []
for query in queries:
new_queries.append(self.filter_fields_query(query))
return new_queries
def filter_fields_blacklist(self, item, blacklist):
"""
Remove all the fields not in the whitelist of the item
"""
new_item = {}
for key in item:
if key not in blacklist:
new_item[key] = item[key]
return new_item
def find_by_redpush_id(self, queries, redpush_id):
"""
find a query in a list of queries that has the given redpush_id
"""
for query in queries:
if "redpush_id" in query:
if query["redpush_id"] == redpush_id:
return query
def Create_Users(self, users):
"""
Tries to create in Redash a list of users. users is a list of dicts with `name` and `email`
If the user is already created then it will silently fail
"""
headers = {"Authorization": "Key {}".format(self.api_key)}
path = "{}/api/users".format(self.url)
for user in users:
response = requests.post(path, headers=headers, json=user).json()
print(response)
|
# Inside of __init__.py
from pandas_ui.funct1 import pandas_ui, get_df, get_pivotdf, get_meltdf
|
#!/usr/bin/python3
import glob
from PIL import Image
# get all the jpg files from the current folder
for infile in glob.glob("*.jpg"):
im = Image.open(infile)
# convert to thumbnail image
im.thumbnail((500, 500), Image.ANTIALIAS)
# don't save if thumbnail already exists
if infile[0:2] != "T_":
# prefix thumbnail file with T_
im.save("thumbs/T_" + infile, "JPEG")
|
# -*- coding: utf-8 -*-
"""
@file
@brief Creates custom classes to interpret Python expression as column operations.
"""
from .column_operator import ColumnOperator
from .others_types import NA
class ColumnGroupOperator(ColumnOperator):
"""
Defines an operation between two columns.
"""
def __init__(self):
"""
Initiates the operator.
@param name name of the column
"""
pass
def __str__(self):
"""
usual
"""
raise NotImplementedError()
def __call__(self, columns):
"""
returns the results of this operation between a list of columns
"""
raise NotImplementedError()
class OperatorGroupLen(ColumnGroupOperator):
"""
defines the group function ``len``
"""
def __str__(self):
"""
usual
"""
return "len"
def __call__(self, columns):
"""
returns the results of this operation between a list of columns
"""
if not hasattr(columns, '__iter__'):
raise TypeError(
"we expect an iterator here not " + str(type(columns)))
return len(columns)
class OperatorGroupAvg(ColumnGroupOperator):
"""
defines the group function ``avg``, the default value when the set is empty is None
"""
def __str__(self):
"""
usual
"""
return "avg"
def __call__(self, columns):
"""
returns the results of this operation between a list of columns,
it returns @see cl NA for a null set
"""
if not hasattr(columns, '__iter__'):
raise TypeError(
"we expect an iterator here not " + str(type(columns)))
# we walk through the set only once
nb = 0
for val in columns:
if nb == 0:
s = val
else:
s += val
nb += 1
if nb == 0:
return NA
else:
return s / nb
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dalton(CMakePackage):
"""Molecular electronic-structure program with extensive
functionality for calculations of molecular properties
at the HF, DFT, MCSCF, MC-srDFT, and CC levels of theory.
"""
homepage = "https://daltonprogram.org"
git = 'https://gitlab.com/dalton/dalton.git'
maintainers = ['foeroyingur']
version('master', branch='master', submodules=True)
version('2020.0', tag='2020.0', submodules=True)
version('2018.2', tag='2018.2', submodules=True)
variant('build_type', default='Release', values=('Debug', 'Release'),
description='CMake build type')
variant('ilp64', default=False, description='Use 64-bit integers')
variant('mpi', default=True, description='Use MPI')
variant('gen1int', default=True, description='Build Gen1Int library')
variant('pelib', default=True, when='~ilp64',
description='Build PE library to enable polarizable embedding calculations')
variant('pde', default=True, when='@2020.0: +pelib',
description='Enable polarizable density embedding through the PE library')
variant('qfitlib', default=True, description='Build QFIT library')
depends_on('cmake@3.1:', type='build')
depends_on('blas', type='link')
depends_on('lapack', type='link')
with when('+pde'):
depends_on('hdf5+fortran', when='+mpi', type='link')
depends_on('hdf5+fortran~mpi', when='~mpi', type='link')
depends_on('mpi', when='+mpi', type=('build', 'link', 'run'))
patch('pelib-master.patch', when='@master+mpi+pelib%gcc@10:',
working_dir='external/pelib')
patch('pelib-2020.0.patch', when='@2020.0+mpi+pelib%gcc@10:',
working_dir='external/pelib')
patch('soppa-2018.2.patch', when='@2018.2%intel')
patch('cbiexc-2018.2.patch', when='@2018.2%intel')
conflicts('%gcc@10:', when='@2018.2',
msg='Dalton 2018.2 cannot be built with GCC >= 10, please use an older'
' version or a different compiler suite.')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.spec.prefix.join('dalton'))
def cmake_args(self):
math_libs = self.spec['lapack'].libs + self.spec['blas'].libs
if '+mpi' in self.spec:
env['CC'] = self.spec['mpi'].mpicc
env['CXX'] = self.spec['mpi'].mpicxx
env['F77'] = self.spec['mpi'].mpif77
env['FC'] = self.spec['mpi'].mpifc
args = ['-DEXPLICIT_LIBS:STRING={0}'.format(math_libs.ld_flags),
self.define('ENABLE_AUTO_BLAS', False),
self.define('ENABLE_AUTO_LAPACK', False),
self.define_from_variant('ENABLE_MPI', variant='mpi'),
self.define_from_variant('ENABLE_64BIT_INTEGERS', variant='ilp64'),
self.define_from_variant('ENABLE_GEN1INT', variant='gen1int'),
self.define_from_variant('ENABLE_PELIB', variant='pelib'),
self.define_from_variant('ENABLE_PDE', variant='pde'),
self.define_from_variant('ENABLE_QFITLIB', variant='qfitlib')]
return args
|
import json
import requests
from requests.exceptions import ConnectionError
from django.core.exceptions import ObjectDoesNotExist
import logging
from django.conf import settings
from requests.auth import HTTPBasicAuth
from trust_monitor_driver.informationDigest import InformationDigest, MapDigest
from trust_monitor.verifier.ra_verifier import RaVerifier
from driverCITSettings import *
import xmltodict
import untangle
from trust_monitor.models import Host
from trust_monitor.verifier.structs import IMARecord
from trust_monitor.attestation_data import (
HostAttestation, HostAttestationExtraInfo)
from trust_monitor_driver.driverConstants import *
from trust_monitor.verifier.instantiateDB import InstantiateDigest
requests.packages.urllib3.disable_warnings()
logger = logging.getLogger('driver')
class DriverCIT():
headers_json = {'content-type': 'application/json'}
headers_xml = {'accept': 'application/xml'}
# Register OpenCIT node
def registerNode(self, host):
logger.info('In registerNode method of driverCIT')
# TODO: implement method
pass
# Attest OpenCIT node
def pollHost(self, node):
logger.info('In pollHost method in driverOpenCIT')
host = Host.objects.get(hostName=node['node'])
url = (
'https://' + CIT_LOCATION + ':8443/mtwilson/v2/host-attestations')
logger.info('Analyse node: ' + host.hostName)
try:
# First, query the AS to attest the host
jsonAttest = {'host_uuid': host.hostName}
logger.debug('Define json object to be sent to OpenCIT '
'to perform attestation')
respo = requests.post(
url,
auth=HTTPBasicAuth(
CIT_API_LOGIN,
CIT_API_PASSWORD),
data=json.dumps(jsonAttest),
headers=self.headers_json, verify=False)
logger.info('Get report from %s' % host.hostName)
# Then, retrieve the AS report
url = (
"https://" + CIT_LOCATION +
":8443/mtwilson/v2/host-attestations?host_id=" +
host.hostName)
report = requests.get(
url,
headers=self.headers_xml,
auth=HTTPBasicAuth(
CIT_API_LOGIN,
CIT_API_PASSWORD),
verify=False)
# Then, parse the report and extract data (time, trust)
data = xmltodict.parse(report.content)
saml = []
try:
saml = (data['host_attestation_collection']
['host_attestations']['host_attestation'][0]['saml'])
except Exception as ex:
saml = (data['host_attestation_collection']
['host_attestations']['host_attestation']['saml'])
samlobj = untangle.parse(saml)
# Extract trust information
trust = extractTrustLevelFromResult(samlobj)
# Create IMARecord for IMA verification
rep_parser = XML_CIT_ReportParser(report.content)
rep_parser.createReport()
InformationDigest.host = host.hostName
# Call the verify method from the ra_verifier.py script
ra_verifier = RaVerifier()
info_digest = InformationDigest()
known_digests = " ".join(
item for item in InstantiateDigest.known_digests)
logger.debug("Running IMA verification")
result = ra_verifier.verifier(
host.distribution,
host.analysisType,
info_digest,
checked_containers=False,
report_id=0,
known_digests=known_digests,
port=settings.CASSANDRA_PORT,
ip=settings.CASSANDRA_LOCATION)
# If IMA verification fails, attestation is false
logger.debug("IMA verification result: " + str(result))
if result and trust:
if isinstance(result, list):
logger.debug("IMA verification result is of correct type")
# IMA verification is a 2 elements list with a first bool
# and extra info as second item
trust = result[0]
else:
logger.debug("IMA verification is of incorrect type")
trust = False
logger.debug("IMA verification has result: " + str(trust))
else:
logger.debug("IMA verification failed")
trust = False
logger.debug("Creating CIT host attestation extra info")
# Parse the IMA verification output (in info_digest)
MapDigest.mapDigest[host.hostName] = info_digest
listNotFound = (
[] if len(info_digest.list_not_found) == 0
else info_digest.list_not_found)
listFakeLib = (
[] if len(info_digest.list_fake_lib) == 0
else info_digest.list_fake_lib)
extra_info = HostAttestationExtraInfo(
info_digest.n_digests_ok,
info_digest.n_digests_not_found,
info_digest.n_digests_fake_lib,
listNotFound,
listFakeLib,
info_digest.n_packages_ok,
info_digest.n_packages_security,
info_digest.n_packages_unknown,
info_digest.n_packages_not_security
)
logger.debug("CIT host attestation extra info created")
del info_digest
# Create (and return) the final HostAttestation object
host_attestation = HostAttestation(
host.hostName,
trust,
0,
extra_info,
"Not supported",
CIT_DRIVER
)
try:
del MapDigest.mapDigest[host.hostName]
except KeyError as ke:
logger.warning('Node %s no in map' % host.hostName)
InformationDigest.host = ''
return host_attestation
except Exception as e:
logger.error(
'Exception occurred while attesting CIT host: ' +
str(e))
return None
# See if Attestation Server (OpenCIT) is alive
def getStatus(self):
logger.info('Get Status of Driver OpenCIT')
configured = False
active = False
if not CIT_LOCATION:
logger.info('The CIT driver is not configured')
configured = False
else:
configured = True
try:
url = 'https://'+CIT_LOCATION+':8443/mtwilson-portal'
logger.debug('Try to contact OpenCIT on %s' % url)
resp = requests.get(url, verify=False, timeout=5)
logger.debug('Status = ' + str(resp.status_code))
active = True
except Exception as e:
logger.error('Error impossible to contact OpenCIT %s' % str(e))
active = False
return {CIT_DRIVER: {'configuration': configured, 'active': active}}
class XML_CIT_ReportParser(object):
def __init__(self, report_xml):
self.report = report_xml
def createReport(self):
try:
ima_xml = []
data = xmltodict.parse(self.report)
try:
ima_xml = (data['host_attestation_collection']
['host_attestations']['host_attestation'][0]
['trustReport']['hostReport']['pcrManifest']
['imaMeasurementXml'])
except Exception:
ima_xml = (data['host_attestation_collection']
['host_attestations']
['host_attestation']['trustReport']['hostReport']
['pcrManifest']['imaMeasurementXml'])
ima_obj = untangle.parse(ima_xml)
except Exception as ex:
raise Exception(ex.message)
for measure in ima_obj.IMA_Measurements.File:
pcr = "10"
template_digest = "null"
template_name = "ima-ng"
template_desc = "ima-ng"
event_digest = measure.cdata
event_name = measure['Path']
id_docker = "host"
template_data = ("sha1:" + event_digest + " " + event_name +
" " + id_docker)
# sha1:event_digest event_name id_docker
file_line = (pcr + " " + template_digest + " " +
template_name + " " + template_data)
logger.debug("Creating report: adding measure as IMARecord")
IMARecord(file_line)
# for child in root:
# print child.tag, child.attrib
def extractTrustLevelFromResult(samlobj):
trust_lvl = (
samlobj
.saml2_Assertion
.saml2_AttributeStatement
.saml2_Attribute[2]
.saml2_AttributeValue
.cdata)
logger.debug("CIT trust level for host is " + trust_lvl)
if (trust_lvl) == "true":
return True
else:
return False
|
# -*- coding: utf-8 -*-
'''
Clase Honda
Crea una honda que se usará para lanzarle la comida a los osos
'''
from OpenGL.GL import *
from OpenGL.GLU import *
import math
# Clase Honda
# Campos:
# xpos (posición en x): float
# ypos (posición en y): float
# zpos (posicioón en z): float
# sz (factor de escala): floatv
# rgb (color): floatv
# base (lista con polígonos de la base): glList
# lado1 (lista con polígonos de ese lado): glList
# lado2 (lista con polígonos de ese lado): glList
# lista (lista para dibujarse en OpenGL): glList
class Honda:
# Constructor:
def __init__(self, pos= [0.0, 0.0, 0.0], sz= None, rgb= [0.5843, 0.3725, 0.1255, 1.0]):
# Color marrón
self.xpos= pos[0]
self.ypos= pos[1] - 200
self.zpos= pos[2]
self.sz= sz
self.rgb= rgb
self.base= self.generarCilindro(sz= [20, 40, 20])
self.lado1= self.generarCilindro(sz= [20, 40, 20])
self.lado2= self.generarCilindro(sz= [20, 40, 20])
self.lista= self.generarLista()
# Métodos
# getPos: None -> floatv
# Retorna las coordenadas donde ubicar el alimento para lanzarlo
def getPos(self):
if self.sz != None:
return [self.xpos, self.ypos*self.sz[1], self.zpos]
return [self.xpos, self.ypos, self.zpos]
# generarLista: None -> glList
# Genera la lista de la onda para dibujar
def generarLista(self):
lista= glGenLists(1)
glNewList(lista, GL_COMPILE)
glShadeModel(GL_SMOOTH)
glMaterialfv(GL_FRONT, GL_AMBIENT, [0.0,0.0,0.0,1.0])
glMaterialfv(GL_FRONT, GL_DIFFUSE, self.rgb)
glMaterialfv(GL_FRONT, GL_SPECULAR,[0.0,0.0,0.0,1.0])
glMaterialfv(GL_FRONT, GL_SHININESS, [6.0])
glMaterialfv(GL_FRONT, GL_EMISSION, [0.0,0.0,0.0,1.0])
# Base
glPushMatrix()
glTranslate(0, 20, 0)
glCallList(self.base)
glPopMatrix()
# Lados
glPushMatrix()
glTranslate(0, 40, 0)
glRotatef(45, 1, 0, 0)
glTranslatef(0, 20, 0)
glCallList(self.lado1)
glPopMatrix()
glPushMatrix()
glTranslate(0, 40, 0)
glRotatef(45, -1, 0, 0)
glTranslatef(0, 20, 0)
glCallList(self.lado2)
glPopMatrix()
glEndList()
return lista
# generarCilindro: v(float, float, float) -> glList
# Genera una lista de un cilindro centrado en el origen de tamaño 1, orientado en Y
def generarCilindro(self, sz=[1, 1, 1]):
lista= glGenLists(1)
glNewList(lista, GL_COMPILE)
glPushMatrix()
glScalef(sz[0], sz[1], sz[2])
puntosCirculoAbajo= []
normales= [] # serán normales en las caras
glBegin(GL_TRIANGLE_FAN)
glNormal3f= (0, -1, 0)
glVertex3f= (0, -0.5, 0)
radio= 0.5
k= 60
angulo= 2*math.pi/k
for i in range(k+1):
ang_i= angulo*i
p= [radio*math.cos(ang_i), -0.5, -radio*math.sin(ang_i)]
puntosCirculoAbajo.append(p)
n= [radio*math.cos(ang_i-angulo/2), 0, -radio*math.sin(ang_i-angulo/2)]
normales.append(n)
glVertex3fv(p)
glEnd()
puntosCirculoArriba= []
glBegin(GL_TRIANGLE_FAN)
glNormal3f= (0, 1, 0)
glVertex3f= (0, 0.5, 0)
radio= 0.5
angulo= 2*math.pi/k
for i in range(k+1):
ang_i= angulo*i
p= [radio*math.cos(ang_i), 0.5, -radio*math.sin(ang_i)]
puntosCirculoArriba.append(p)
glVertex3fv(p)
glEnd()
glBegin(GL_QUADS)
for i in range(len(puntosCirculoAbajo) - 1):
glNormal3fv(normales[i])
glVertex3fv(puntosCirculoAbajo[i])
glVertex3fv(puntosCirculoAbajo[i+1])
glVertex3fv(puntosCirculoArriba[i+1])
glVertex3fv(puntosCirculoArriba[i])
glEnd()
glPopMatrix()
glEndList()
return lista
# dibujar: None -> None
# Dibuja a la honda en la posición correspondiente
def dibujar(self):
glShadeModel(GL_SMOOTH)
glPushMatrix()
glTranslatef(self.xpos, self.ypos, self.zpos)
if self.sz != None:
glScalef(self.sz[0], self.sz[1], self.sz[2])
glColor4fv(self.rgb)
glCallList(self.lista)
glPopMatrix()
|
import sys
import textwrap
from okonomiyaki.errors import OkonomiyakiError
from ..misc import parse_assignments, substitute_variables, substitute_variable
from ..py3compat import StringIO
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
class TestParseAssignments(unittest.TestCase):
def test_parse_simple(self):
r_data = {"name": "dummy", "OsDist": None}
s = textwrap.dedent("""\
name = "dummy"
OsDist = None
""")
data = parse_assignments(StringIO(s))
self.assertEqual(data, r_data)
def test_parse_simple_invalid_file(self):
with self.assertRaises(OkonomiyakiError):
parse_assignments(StringIO("1 + 2"))
class TestSubstitute(unittest.TestCase):
def test_simple(self):
# Given
data = {
"foo": "${yolo}",
"bar": "${yolo}/bin",
}
variables = {
"yolo": "/foo/bar",
}
r_data = {
"foo": "/foo/bar",
"bar": "/foo/bar/bin",
}
# When
rendered_standard = substitute_variables(data, variables)
rendered_curly_only = substitute_variables(
data, variables, template='curly_braces_only'
)
# Then
self.assertEqual(rendered_standard, r_data)
self.assertEqual(rendered_curly_only, r_data)
def test_recursive(self):
# Given
data = {
"foo": "${yolo}",
"bar": "${foo}/bin",
}
variables = {
"yolo": "/foo/bar",
}
variables.update(data)
r_data = {
"foo": "/foo/bar",
"bar": "/foo/bar/bin",
}
# When
variables_standard = substitute_variables(variables, variables)
variables_curly_only = substitute_variables(
variables, variables
)
rendered_standard = substitute_variables(data, variables_standard)
rendered_curly_only = substitute_variables(
data, variables_curly_only, template='curly_braces_only'
)
# Then
self.assertEqual(rendered_standard, r_data)
self.assertEqual(rendered_curly_only, r_data)
def test_escape(self):
# Given
data = {
"foo": "$${yolo}",
"bar": "$${foo}/bin",
}
variables = {
"yolo": "/foo/bar",
}
variables.update(data)
r_data = {
"foo": "$${yolo}",
"bar": "$${foo}/bin",
}
r_foo_ignore_escape = "$${yolo}"
r_foo_escape = "${yolo}"
# When
variables = substitute_variables(
variables, variables, template="curly_braces_only"
)
rendered = substitute_variables(
data, variables, template="curly_braces_only"
)
render_foo_ignore_escape = substitute_variable(
data["foo"], variables, template="curly_braces_only",
ignore_escape=True
)
render_foo_escape = substitute_variable(
data["foo"], variables, template="curly_braces_only"
)
# Then
self.assertEqual(rendered, r_data)
self.assertEqual(render_foo_ignore_escape, r_foo_ignore_escape)
self.assertEqual(render_foo_escape, r_foo_escape)
def test_without_curly_braces(self):
# Given
data = {
"foo": "$yolo",
"bar": "$foo/bin",
}
variables = {
"yolo": "/foo/bar",
}
variables.update(data)
r_data = {
"foo": "$yolo",
"bar": "$foo/bin",
}
# When
variables = substitute_variables(
variables, variables, template="curly_braces_only"
)
rendered = substitute_variables(
data, variables, template="curly_braces_only"
)
# Then
self.assertEqual(rendered, r_data)
def test_empty_substitution(self):
# Given
# Empty variable name is invalid
data = {
"foo": "${}yolo",
"bar": "/bin",
}
variables = {
"yolo": "/foo/bar",
}
variables.update(data)
# When/Then
with self.assertRaises(ValueError):
variables = substitute_variables(
variables, variables, template="curly_braces_only"
)
substitute_variables(
data, variables, template="curly_braces_only"
)
def test_invalid_substitution(self):
# Given
# idpattern = r'[_a-z][_a-z0-9]*'
# Characters not matching idpattern are invalid
data = {
"foo": "${yo-lo}",
"bar": "/bin",
}
variables = {
"yo-lo": "/foo/bar",
}
variables.update(data)
# When/Then
with self.assertRaises(ValueError):
variables = substitute_variables(
variables, variables, template="curly_braces_only"
)
substitute_variables(
data, variables, template="curly_braces_only"
)
def test_key_error_substitution(self):
# Given
# Nonexistent variable name gives key error
data = {
"foo": "${nonexistent}yolo",
"bar": "/bin",
}
variables = {
"yolo": "/foo/bar",
}
variables.update(data)
# When/Then
with self.assertRaises(KeyError):
variables = substitute_variables(
variables, variables, template="curly_braces_only"
)
substitute_variables(
data, variables, template="curly_braces_only"
)
|
from flask import Flask, request
from flask_celery import make_celery
from pymongo import MongoClient , DESCENDING , ASCENDING
import json
from flask_cors import CORS
from send import SendUrl , mysqlread , mysqlinsert
from bson.objectid import ObjectId
with open('config.json') as f:
config = json.load(f)
app = Flask(__name__)
app.config['CELERY_BROKER_URL'] = 'amqp://user:bitnami@0.0.0.0:5672'
app.config['CELERY_RESULT_BACKEND'] = 'rpc://'
CORS(app)
celery = make_celery(app)
@app.route('/send', methods=['POST'])
def urlsend():
try:
data = request.json
send.delay(data)
except Exception as e:
print("------rabbitmq api---------" +str(e))
return "successfully send"
@celery.task(name="pgm.send")
def send(data):
try:
url = set(mysqlread())
if data['url'] in url:
pass
else:
SendUrl(data)
mysqlinsert(data['url'])
except Exception as e:
print("------send task---------" +str(e))
return "completed"
@app.route('/', methods=['POST'])
def process():
try:
data = request.json
function.delay(data)
except Exception as e:
print("------flask api---------" +str(e))
return "ok"
@app.route('/get', methods=['POST'])
def replace():
try:
data = request.json
data = getData(data)
for i in data:
i['_id'] = str(i['_id'])
except Exception as e:
print("------flask api---------" +str(e))
data = [{}]
return str(json.dumps(data))
def getData(d):
try:
client =MongoClient(config['mongodb']['host'],username=config['mongodb']['username'],password=config['mongodb']['password'],authSource=config['mongodb']['authSource'])
db = client.DomainMonitor
collection = db.api
data = list(collection.find(d,{'_class':0}).sort('_id',DESCENDING))
client.close()
except Exception as e:
print("------getData---------" +str(e))
return data
@app.route('/update', methods=['POST'])
def mongoupdation():
try:
data = request.json
updated.delay(data)
except Exception as e:
print("------flask api---------" +str(e))
return "ok"
@celery.task(name="pgm.updated")
def updated(data):
try:
client =MongoClient(config['mongodb']['host'],username=config['mongodb']['username'],password=config['mongodb']['password'],authSource=config['mongodb']['authSource'])
db = client.DomainMonitor
collection = db.api
ID = data.pop('_id')
collection.delete_one({'_id': ObjectId(ID)})
#collection.update({'_id': ObjectId(ID)},{'$set':data}, upsert=True, multi=False)
collection.insert_one(data)
client.close()
except Exception as e:
print("------DB---------" +str(e))
return "completed"
@celery.task(name="pgm.function")
def function(data):
try:
client =MongoClient(config['mongodb']['host'],username=config['mongodb']['username'],password=config['mongodb']['password'],authSource=config['mongodb']['authSource'])
db = client.DomainMonitor
collection = db.api
collection.insert_one(data)
client.close()
except Exception as e:
print("------DB---------" +str(e))
return "completed"
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0')
|
from uuid import uuid4
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.dispatch import receiver
from django.utils.crypto import get_random_string
from django.utils.translation import ugettext_lazy as _
from rest_framework.authtoken.models import Token
from online.models import Evaluator
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email'), unique=True)
first_name = models.CharField(_('first_name'), max_length=30, blank=True)
last_name = models.CharField(_('last_name'), max_length=30, blank=True)
full_name = models.CharField(max_length=60, blank=True, null=True)
date_joined = models.DateTimeField(_('date_joined'), auto_now_add=True)
is_staff = models.BooleanField(_('is_staff'), default=False)
is_moderator = models.BooleanField(default=False)
is_team_leader = models.BooleanField(default=False)
is_active = models.BooleanField(_('is_active'), default=True)
is_blocked = models.BooleanField(default=False)
is_validated = models.BooleanField(default=False)
is_jury = models.BooleanField(default=False)
is_from_HR = models.BooleanField(default=False)
is_from_evaluation_committee = models.BooleanField(default=False)
is_password_reset_required = models.BooleanField(default=False)
reset_password_code = models.UUIDField(default=None, blank=True, null=True)
temporary_password = models.CharField(max_length=4, blank=True, null=True)
validation_code = models.UUIDField(default=None, blank=True, null=True)
one_time_use_code = models.CharField(max_length=20, blank=True, null=True)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
'''
Returns the first_name plus the last_name, with a space in between
'''
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def generate_reset_password_code(self):
'''
Returns UUID string to be sent by email when user require a password recovery action
'''
uuid_code = uuid4()
self.reset_password_code = str(uuid_code)
self.temporary_password = get_random_string(4, "hacktrx23456789")
self.save()
return self.reset_password_code
def generate_validation_code(self):
"""
Returns UUID string to be sent by email when user account is created
"""
uuid_code = uuid4()
self.validation_code = str(uuid_code)
self.save()
return self.validation_code
def normalize_user_evaluator_role(self):
"""
Avoids to have multiple roles at the same time: is_jury, is_from_HR, is_from_evaluation_committee
"""
evaluator_role_counter = 0
if self.is_jury:
evaluator_role_counter += 1
if self.is_from_HR:
evaluator_role_counter += 1
if self.is_from_evaluation_committee:
evaluator_role_counter += 1
if evaluator_role_counter > 1 or self.is_moderator:
self.is_jury = False
self.is_from_HR = False
self.is_from_evaluation_committee = False
if not self.is_jury or not self.is_from_evaluation_committee:
evaluators = Evaluator.objects.filter(user=self)
evaluators.delete()
def save(self, *args, **kwargs):
self.normalize_user_evaluator_role()
super().save(*args, **kwargs)
@receiver(models.signals.post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs): # pylint: disable=unused-argument
if created:
Token.objects.create(user=instance)
@receiver(models.signals.post_save, sender=settings.AUTH_USER_MODEL)
def create_evaluator(sender, instance, created=False, **kwargs): # pylint: disable=unused-argument
if instance.is_jury or instance.is_from_evaluation_committee:
Evaluator.objects.get_or_create(user=instance)
class UserDevice(models.Model):
user = models.ForeignKey(User, related_name='user_device', on_delete=models.CASCADE)
operating_system = models.CharField(max_length=10)
code = models.CharField(max_length=200)
class Meta():
ordering = ['user']
|
import json
'''
This deals with reading and validating params.
'''
class InputParametersProcess:
def __init__(self, input_param_file):
data = json.load(open(input_param_file))
self.framework = data['framework']
self.model = data['model']
self.source_image_folder = data['source_image']['source_image_folder']
self.source_image_search_string = data['source_image']['source_image_search_string']
self.source_image_augment = data['source_image']['source_image_augment']
self.output_upload_location = data['output_upload']['upload_location']
self.output_cred_user = data['output_upload']['cred_user']
self.output_cred_password = data['output_upload']['cred_password']
self.training_hostname = data['training_infra']['hostname']
self.training_ssh_path = data['training_infra']['ssh_path']
|
# coding: utf-8
"""
Example of a « echo » websocket server by using `tornado_websocket.WebSocket`.
"""
|
from binance.client import Client
import time
import matplotlib
from matplotlib import cm
import matplotlib.pyplot as plt
from binance.enums import *
import save_historical_data_Roibal
from BinanceKeys import BinanceKey1
api_key = BinanceKey1['api_key']
api_secret = BinanceKey1['api_secret']
client = Client(api_key, api_secret)
# get a deposit address for BTC
address = client.get_deposit_address(asset='BTC')
def etf_portfolio_1():
# get system status
#Create List of ICO Coins to form the ETF
list_of_symbols_ETF = ['BTCUSDT', 'ETHUSDT', 'BNBUSDT','BNBBTC', 'ETHBTC', 'LTCBTC']
#time_horizon = "Short"
#Risk = "High"
time.sleep(1)
print(list_of_symbols_ETF)
#Get Info about Coins in Watch List
coin_prices(list_of_symbols_ETF)
coin_tickers(list_of_symbols_ETF)
#for symbol in list_of_symbols:
# market_depth(symbol)
def convert_time_binance(gt):
#Converts from Binance Time Format (milliseconds) to time-struct
#From Binance-Trader Comment Section Code
#gt = client.get_server_time()
print("Binance Time: ", gt)
print(time.localtime())
aa = str(gt)
bb = aa.replace("{'serverTime': ","")
aa = bb.replace("}","")
gg=int(aa)
ff=gg-10799260
uu=ff/1000
yy=int(uu)
tt=time.localtime(yy)
#print(tt)
return tt
def coin_prices(CTF):
#Will print to screen, prices of coins on 'watch list'
#returns all prices
prices = client.get_all_tickers()
print("\nSelected (CTF) Ticker Prices: ")
for price in prices:
if price['symbol'] in CTF:
print(price)
return prices
def coin_tickers(CTF):
# Prints to screen tickers for 'CTF' coins
# Returns list of all price tickers
tickers = client.get_orderbook_tickers()
print("\nCTF Order Tickers: \n")
for tick in tickers:
if tick['symbol'] in CTF:
print(tick)
return tickers
if __name__ == "__main__":
etf_portfolio_1()
|
import numpy as np
import os
import gym
import torch
import torch.nn as nn
import collections
import copy
import random
# hype-params
learn_freq = 5 #经验池攒一些经验再开启训练
buffer_size = 20000 #经验池大小
buffer_init_size = 200 #开启训练最低经验条数
batch_size = 32 #每次sample的数量
learning_rate = 0.001 #学习率
GAMMA = 0.99 # reward折扣因子
class Model(nn.Module):
def __init__(self, act_dim, state_dim):
super(Model, self).__init__()
hidden1_size = 128
hidden2_size = 128
self.input_layer = nn.Linear(state_dim, hidden1_size)
self.input_layer.weight.data.normal_(0, 0.1)
self.hidden_layer = nn.Linear(hidden1_size, hidden2_size)
self.hidden_layer.weight.data.normal_(0, 0.1)
self.output_layer = nn.Linear(hidden2_size, act_dim)
self.output_layer.weight.data.normal_(0, 0.1)
def forward(self, state):
h1 = nn.functional.relu(self.input_layer(state))
h2 = nn.functional.relu(self.hidden_layer(h1))
Q = self.output_layer(h2)
return Q
class DQN:
def __init__(self, model, act_dim=None, gamma=None, lr=None):
self.model = model
self.target_model = copy.deepcopy(model)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
self.loss = nn.MSELoss()
self.act_dim = act_dim
self.lr = lr
self.gamma = gamma
def predict(self, state):
return self.model.forward(state) # shape: batch_size x act_dim
def learn(self, state, action, reward, state_next, done): # shape: batch_size x 1
# 根据target网络求target Q
next_values = self.target_model.forward(state_next).detach() # 阻断target梯度, shape: batch_size x act_dim
target_value = reward + (1.0 - done)*self.gamma*next_values.max(1)[0] # shape: batch_size x 1
# 根据当前网络获取Q(s, a)
curr_value = self.model.forward(state)
action = action.unsqueeze(1)
pred_value = torch.gather(curr_value, 1, action.long()) # batch_size x act_dim中以第二维取action对应的Q值成为batch_size x 1
cost = self.loss(pred_value, target_value)
self.optimizer.zero_grad()
cost.backward()
self.optimizer.step()
return cost
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict()) # 更新target网络参数
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
def append(self, exp):
self.buffer.append(exp)
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state_batch, action_batch, reward_batch, state_netx_batch, done_batch = [], [], [], [], []
for exp in batch:
s, a, r, s_next, done = exp
state_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
state_netx_batch.append(s_next)
done_batch.append(done)
return torch.from_numpy(np.array(state_batch).astype('float32')), \
torch.from_numpy(np.array(action_batch).astype('int32')), \
torch.from_numpy(np.array(reward_batch).astype('float32')), \
torch.from_numpy(np.array(state_netx_batch).astype('float32')), \
torch.from_numpy(np.array(done_batch).astype('float32'))
def __len__(self):
return len(self.buffer)
class Agent:
def __init__(self, algorithm, state_dim, act_dim, epsilon=0.1, epsilon_fade=0.0):
self.dqn = algorithm
self.state_dim = state_dim
self.act_dim = act_dim
self.steps = 0
self.update_target_steps = 200
self.epsilon = epsilon
self.epsilon_fade = epsilon_fade
def explore(self, state):
sample = np.random.rand()
if sample < self.epsilon:
action = np.random.randint(self.act_dim)
else:
action = self.greedy(state)
self.epsilon = max(0.01, self.epsilon - self.epsilon_fade)
return action
def greedy(self, state):
state = torch.from_numpy(state)
state = torch.tensor(state, dtype=torch.float32)
pred_value = self.dqn.target_model.forward(state)
values = pred_value.detach().numpy()
values = np.squeeze(values, axis=None)
action = np.argmax(values) # 选择值最大的下标
return action
def learn(self, state, action, reward, state_next, done):
if self.steps % self.update_target_steps == 0:
self.dqn.update_target()
self.steps += 1
cost = self.dqn.learn(state, action, reward, state_next, done)
return cost
def evaluate(env, agent, render=True):
eval_reward = []
for i in range(10):
state = env.reset()
episode_reward = 0
while True:
action = agent.greedy(state)
state, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
if __name__ == '__main__':
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
exp_buffer = ReplayMemory(buffer_size)
model = Model(act_dim=action_dim, state_dim=state_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=learning_rate)
agent = Agent(algorithm, state_dim=state_dim, act_dim=action_dim, epsilon=0.1, epsilon_fade=1e-6)
state = env.reset()
while(len(exp_buffer)<buffer_init_size):
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
exp_buffer.append((state, action, reward, state_next, done))
state = state_next
if done:
state = env.reset()
episode = 0
while episode < 20000:
for i in range(0, 100):
episode += 1
total_reward = 0
state = env.reset()
step = 0
while True:
step += 1
action = agent.explore(state)
state_next, reward, done, _ = env.step(action)
# env.render()
exp_buffer.append((state, action, reward, state_next, done))
# train
if len(exp_buffer) > buffer_init_size and step%learn_freq == 0:
(state_batch, action_batch, reward_batch, state_next_batch, done_batch) = exp_buffer.sample(batch_size)
loss = agent.learn(state_batch, action_batch, reward_batch, state_next_batch, done_batch)
total_reward += reward
state = state_next
if done:
break
eval_reward = evaluate(env, agent, render=True)
print('episode: %d e_greed: %.5f test_reward: %.1f' %(episode, agent.epsilon, eval_reward))
torch.save(agent.dqn.target_model, './dqn.pkl') |
def string_reverse(word):
if len(word) == 1:
return word
else:
return string_reverse(word[1:]) + word[0]
print(string_reverse("hello"))
""" --- """
def palindromCheck(word2):
if len(word2) == 1 or len(word2) == 0:
return True
else:
return word2[0] == word2[-1] and palindromCheck(word2[1:-1])
print(palindromCheck("mayam"))
""" --- """
def sumOfNum(num):
if num == 1:
return 1
else:
return num + sumOfNum(num-1)
print(sumOfNum(10))
""" --- """
def fib(n):
if n == 1 or n == 0:
return n
else:
return fib(i-1) + fib(i-2)
print(fib(7)) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import threading
from uuid import uuid4
from ..utils.func import next_chunk, Static
from .base import WeChatTestCase
class UtilFunctoolTestCase(WeChatTestCase):
def test_next_chunk(self):
"""测试next chunk"""
data = list(next_chunk(range(50)))
self.assertEqual(len(data), 1)
self.assertEqual(set(range(50)), set(data[0]))
data = list(next_chunk(range(100)))
self.assertEqual(len(data), 1)
self.assertEqual(set(range(100)), set(data[0]))
data = list(next_chunk(range(101), 100))
self.assertEqual(len(data), 2)
self.assertEqual(set(range(100)), set(data[0]))
self.assertEqual(set((100, )), set(data[1]))
def test_static(self):
"""测试static"""
total = 10
strings = [str(uuid4()) for i in range(total)]
another_strings = [s.encode().decode() for s in strings]
for i in range(total):
# 验证直接id不相等
self.assertEqual(strings[i], another_strings[i])
self.assertNotEqual(id(strings[i]), id(another_strings[i]))
self.assertEqual(
id(Static(strings[i])), id(Static(another_strings[i])))
# 在另一个线程也正常
def another_thread(strings):
another_strings = [s.encode().decode() for s in strings]
for i in range(total):
# 验证直接id不相等
self.assertEqual(strings[i], another_strings[i])
self.assertNotEqual(id(strings[i]), id(another_strings[i]))
self.assertEqual(
id(Static(strings[i])), id(Static(another_strings[i])))
threading.Thread(target=another_thread, args=(strings,))
|
from nose.tools import *
from shared import assert_equals_json
import json
import terrascript
import terrascript.aws.r
class Test_Output(object):
def test_output_classes(self):
output = terrascript.Output("name")
assert isinstance(output, terrascript.Block)
def test_output_example1(self):
# https://www.terraform.io/docs/configuration/outputs.html
resource = terrascript.Resource('name')
output = terrascript.Output("ipaddress", value=resource.ipaddress)
assert_equals_json(output, 'Output_output_example1.json') |
import logging
import os
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings
log = logging.getLogger("uvicorn")
class Settings(BaseSettings):
environment: Optional[str] = os.environ.get("ENVIRONMENT")
testing: Optional[str] = os.environ.get("TESTING")
db_name: Optional[str] = os.environ.get("MONGO_DATABASE_NAME")
@lru_cache
def get_settings() -> BaseSettings:
log.info("Loading config settings from the environment...")
return Settings()
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('login/',
auth_views.LoginView.as_view(template_name='registration/login.html'),
name='login'),
path(r'logout/', auth_views.LogoutView.as_view(template_name='accounts/registration/logout.html'), name='logout'),
path('', include('django.contrib.auth.urls')),
path('register/', views.register, name='register'),
path(
'register/staff',
views.TeacherSignUpView.as_view(), name='teacher-registration'
),
path(
'staff_verify/<uuid:pk>',
views.StaffVerificationView.as_view(),
name='staff-verification'
),
path('dashboard/', views.profile, name='user-profile'),
path('upload-signature', views.upload_staff_signature, name="signature-upload")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
# coding:utf-8
import sys
import os
import inspect
import ast
import torch.nn as nn
import DLtorch
from DLtorch.utils import *
class Plugins(object):
def __init__(self, plugin_root: str):
self.plugin_root = plugin_root
for root, dirs, files in os.walk(self.plugin_root, True):
module_list = self._load_modules(root)
for module in module_list:
if issubclass(module, DLtorch.objective.BaseObjective):
setattr(DLtorch.objective, module.__name__, module)
elif issubclass(module, DLtorch.objective.adversary.BaseAdvGenerator):
setattr(DLtorch.objective.adversary, module.__name__, module)
elif issubclass(module, DLtorch.dataset.BaseDataset):
setattr(DLtorch.dataset, module.__name__, module)
elif issubclass(module, DLtorch.trainer.BaseTrainer):
setattr(DLtorch.trainer, module.__name__, module)
elif issubclass(module, DLtorch.optimizer.BaseOptimizer):
setattr(DLtorch.optimizer, module.__name__, module)
elif issubclass(module, DLtorch.criterion.BaseCriterion):
setattr(DLtorch.criterion, module.__name__, module)
elif issubclass(module, DLtorch.lr_scheduler.BaseLrScheduler):
setattr(DLtorch.lr_scheduler, module.__name__, module)
elif issubclass(module, DLtorch.model.BaseModel):
setattr(DLtorch.model, module.__name__, module)
def _load_modules(self, root: str):
"""
Dynamically load modules from all the files ending with '.py' under current root and return a list.
"""
modules = []
for filename in os.listdir(root):
if filename.endswith(".py"):
name = os.path.splitext(filename)[0]
filename = os.path.join(root, filename)
if name.isidentifier():
fh = None
try:
fh = open(filename, "r", encoding="utf8")
code = fh.read()
module = type(sys)(name)
sys.modules[name] = module
exec(code, module.__dict__)
for _class in module.__dict__.values():
try:
if issubclass(_class, DLtorch.base.BaseComponent):
modules.append(_class)
except:
continue
except (EnvironmentError, SyntaxError) as err:
sys.modules.pop(name, None)
print(err)
finally:
if fh is not None:
fh.close()
return modules
class BaseComponent(object):
def __init__(self, **kwargs):
self._logger = None
@property
def logger(self):
if self._logger is None:
self._logger = getLogger(self.__class__.__name__)
return self._logger
def __getstate__(self):
state = self.__dict__.copy()
if "_logger" in state:
del state["_logger"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
# set self._logger to None
self._logger = None
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import logging
from typing import Dict, Tuple
import reagent.core.types as rlt
import torch
logger = logging.getLogger(__name__)
class MapIDList(torch.nn.Module):
@abc.abstractmethod
def forward(self, raw_ids: torch.Tensor) -> torch.Tensor:
pass
class MapIDScoreList(torch.nn.Module):
@abc.abstractmethod
def forward(
self, raw_ids: torch.Tensor, raw_values: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
pass
class ExactMapIDList(MapIDList):
def __init__(self):
super().__init__()
def forward(self, raw_ids: torch.Tensor) -> torch.Tensor:
return raw_ids
class ExactMapIDScoreList(MapIDScoreList):
def __init__(self):
super().__init__()
def forward(
self, raw_ids: torch.Tensor, raw_values: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return (
raw_ids,
raw_values,
)
class HashingMapIDList(MapIDList):
def __init__(self, embedding_table_size):
super().__init__()
self.embedding_table_size = embedding_table_size
def forward(self, raw_ids: torch.Tensor) -> torch.Tensor:
hashed_ids = torch.ops.fb.sigrid_hash(
raw_ids,
salt=0,
maxValue=self.embedding_table_size,
hashIntoInt32=False,
)
return hashed_ids
class HashingMapIDScoreList(MapIDScoreList):
def __init__(self, embedding_table_size):
super().__init__()
self.embedding_table_size = embedding_table_size
def forward(
self, raw_ids: torch.Tensor, raw_values: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
hashed_ids = torch.ops.fb.sigrid_hash(
raw_ids,
salt=0,
maxValue=self.embedding_table_size,
hashIntoInt32=False,
)
return (
hashed_ids,
raw_values,
)
def make_sparse_preprocessor(
feature_config: rlt.ModelFeatureConfig, device: torch.device
):
"""Helper to initialize, for scripting SparsePreprocessor"""
# TODO: Add option for simple modulo and other hash functions
id2name: Dict[int, str] = feature_config.id2name
name2id: Dict[str, int] = feature_config.name2id
def _make_id_list_mapper(config: rlt.IdListFeatureConfig) -> MapIDList:
mapping_config = feature_config.id_mapping_config[config.id_mapping_name]
if mapping_config.hashing:
return HashingMapIDList(mapping_config.embedding_table_size)
else:
return ExactMapIDList()
id_list_mappers = {
config.feature_id: _make_id_list_mapper(config)
for config in feature_config.id_list_feature_configs
}
def _make_id_score_list_mapper(
config: rlt.IdScoreListFeatureConfig,
) -> MapIDScoreList:
mapping_config = feature_config.id_mapping_config[config.id_mapping_name]
if mapping_config.hashing:
return HashingMapIDScoreList(mapping_config.embedding_table_size)
else:
return ExactMapIDScoreList()
id_score_list_mappers = {
config.feature_id: _make_id_score_list_mapper(config)
for config in feature_config.id_score_list_feature_configs
}
sparse_preprocessor = SparsePreprocessor(
id2name, name2id, id_list_mappers, id_score_list_mappers, device
)
return torch.jit.script(sparse_preprocessor)
class SparsePreprocessor(torch.nn.Module):
"""Performs preprocessing for sparse features (i.e. id_list, id_score_list)
Functionality includes:
(1) changes keys from feature_id to feature_name, for better debuggability
(2) maps sparse ids to embedding table indices based on id_mapping
(3) filters out ids which aren't in the id2name
"""
def __init__(
self,
id2name: Dict[int, str],
name2id: Dict[str, int],
id_list_mappers: Dict[int, MapIDList],
id_score_list_mappers: Dict[int, MapIDScoreList],
device: torch.device,
) -> None:
super().__init__()
assert set(id2name.keys()) == set(id_list_mappers.keys()) | set(
id_score_list_mappers.keys()
)
self.id2name: Dict[int, str] = torch.jit.Attribute(id2name, Dict[int, str])
self.name2id: Dict[str, int] = torch.jit.Attribute(name2id, Dict[str, int])
self.id_list_mappers = torch.nn.ModuleDict(
{id2name[k]: v for k, v in id_list_mappers.items()}
)
self.id_score_list_mappers = torch.nn.ModuleDict(
{id2name[k]: v for k, v in id_score_list_mappers.items()}
)
self.device = device
@torch.jit.export
def preprocess_id_list(
self, id_list: Dict[int, Tuple[torch.Tensor, torch.Tensor]]
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
"""
Input: rlt.ServingIdListFeature
Output: rlt.IdListFeature
"""
ret: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {}
for name, mapper in self.id_list_mappers.items():
fid = self.name2id[name]
if fid in id_list:
offsets, values = id_list[fid]
idx_values = mapper(values)
ret[name] = (
offsets.to(self.device),
idx_values.to(self.device),
)
return ret
@torch.jit.export
def preprocess_id_score_list(
self, id_score_list: Dict[int, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Input: rlt.ServingIdScoreListFeature
Output: rlt.IdScoreListFeature
"""
ret: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = {}
for name, mapper in self.id_score_list_mappers.items():
fid = self.name2id[name]
if fid in id_score_list:
offsets, keys, values = id_score_list[fid]
idx_keys, weights = mapper(keys, values)
ret[name] = (
offsets.to(self.device),
idx_keys.to(self.device),
weights.to(self.device).float(),
)
return ret
|
import utils
from tf_imports import tf, K, TensorBoard
class TensorBoard2(TensorBoard):
def __init__(self, writer, histogram_freq=0, batch_size=None):
super().__init__(
log_dir=None,
histogram_freq=histogram_freq,
batch_size=batch_size,
write_graph=False,
write_grads=False,
write_images=False)
self.writer = writer
self.log_dir = ""
def _init_writer(self):
pass
def set_model(self, model):
writer = self.writer
super().set_model(model)
self.writer = writer
def on_train_begin(self, logs=None):
self._log_optimizer_description(logs)
super().on_train_begin(logs)
def on_epoch_end(self, epoch, logs=None):
logs["train_val_loss_report"] = logs["loss"] / logs["val_loss"]
logs = self._rename_logs(logs)
super().on_epoch_end(epoch, logs)
def _log_optimizer_description(self, logs):
optimizer_config = {"name": self.model.optimizer.__class__.__name__}
optimizer_config.update(self.model.optimizer.get_config())
optimizer_config_json = utils.json_utils.dumps(optimizer_config, sort_keys=True)
optimizer_config_tensor = tf.convert_to_tensor(optimizer_config_json)
summary = K.eval(tf.summary.text(name="optimizer vars", tensor=optimizer_config_tensor))
self.writer.add_summary(summary, global_step=0)
self.writer.flush()
@staticmethod
def _rename_logs(logs):
for key, value in list(logs.items()):
logs["performance/" + key] = value
del logs[key]
return logs
|
from agents import sac_networks as networks
import tensorflow as tf
import tensorflow_probability as tfp
from agents.bijectors import ConditionalScale, ConditionalShift
import tree
class SACAgent(object):
ACTOR_NET_SCOPE = 'actor_net'
CRITIC_NET_SCOPE = 'critic_net'
VALUE_NET_SCOPE = 'value_net'
TARGET_CRITIC_NET_SCOPE = 'target_critic_net'
TARGET_VALUE_NET_SCOPE = 'target_value_net'
def __init__(self,
observation_spec,
action_spec,
actor_net=networks.actor_net,
critic_net=networks.critic_net,
value_net=networks.value_net,
td_errors_loss=tf.losses.huber_loss,
dqda_clipping=0.,
actions_regularizer=0.,
target_q_clipping=None,
residual_phi=0.0,
debug_summaries=False):
self._observation_spec = observation_spec[0]
self._action_spec = action_spec[0]
self._state_shape = tf.TensorShape([None]).concatenate(
self._observation_spec.shape)
self._action_shape = tf.TensorShape([None]).concatenate(
self._action_spec.shape)
self._num_action_dims = self._action_spec.shape.num_elements()
self._scope = tf.get_variable_scope().name
self._actor_net = tf.make_template(
self.ACTOR_NET_SCOPE, actor_net, create_scope_now_=True)
self._critic_net = tf.make_template(
self.CRITIC_NET_SCOPE, critic_net, create_scope_now_=True)
self._value_net = tf.make_template(
self.VALUE_NET_SCOPE, value_net, create_scope_now_=True)
self._target_critic_net = tf.make_template(
self.TARGET_CRITIC_NET_SCOPE, critic_net, create_scope_now_=True)
self._target_value_net = tf.make_template(
self.TARGET_VALUE_NET_SCOPE, value_net, create_scope_now_=True)
base_distribution = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(self._action_spec.shape),
scale_diag=tf.ones(self._action_spec.shape)
)
raw_action_distribution = tfp.bijectors.Chain((
ConditionalShift(name='shift'),
ConditionalScale(name='scale')
))(base_distribution)
self.action_distribution = tfp.bijectors.Tanh()(raw_action_distribution)
self._td_errors_loss = td_errors_loss
if dqda_clipping < 0:
raise ValueError('dqda_clipping must be >= 0.')
self._dqda_clipping = dqda_clipping
self._actions_regularizer = actions_regularizer
self._target_q_clipping = target_q_clipping
self._residual_phi = residual_phi
self._debug_summaries = debug_summaries
def actor_net(self, states, stop_gradients=False):
"""Returns the output of the actor network.
Args:
states: A [batch_size, num_state_dims] tensor representing a batch
of states.
stop_gradients: (boolean) if true, gradients cannot be propogated through
this operation.
Returns:
A [batch_size, num_action_dims] tensor of actions.
Raises:
ValueError: If `states` does not have the expected dimensions.
"""
self._validate_states(states)
shift, scale = self._actor_net(states, self._action_spec)
actions = self.action_distribution.sample(self._action_spec.shape, scale=scale, shift=shift)
action_means = (action_spec.maximum + action_spec.minimum) / 2.0
action_magnitudes = (action_spec.maximum - action_spec.minimum) / 2.0
actions = action_means + action_magnitudes * actions
if stop_gradients:
actions = tf.stop_gradient(actions)
return actions
def critic_net(self, states, actions, for_critic_loss=False):
"""Returns the output of the critic network.
Args:
states: A [batch_size, num_state_dims] tensor representing a batch
of states.
actions: A [batch_size, num_action_dims] tensor representing a batch
of actions.
Returns:
q values: A [batch_size] tensor of q values.
Raises:
ValueError: If `states` or `actions' do not have the expected dimensions.
"""
self._validate_states(states)
self._validate_actions(actions)
return self._critic_net(states, actions,
for_critic_loss=for_critic_loss)
def critic_loss(self, states, actions, rewards, discounts,
next_states):
self._validate_states(states)
self._validate_actions(actions)
self._validate_states(next_states)
self.actions_and_log_probs(states)
return NotImplemented()
def _batch_state(self, state):
"""Convert state to a batched state.
Args:
state: Either a list/tuple with an state tensor [num_state_dims].
Returns:
A tensor [1, num_state_dims]
"""
if isinstance(state, (tuple, list)):
state = state[0]
if state.get_shape().ndims == 1:
state = tf.expand_dims(state, 0)
return state
def action(self, state):
"""Returns the next action for the state.
Args:
state: A [num_state_dims] tensor representing a state.
Returns:
A [num_action_dims] tensor representing the action.
"""
return self.actor_net(self._batch_state(state), stop_gradients=True)[0, :]
def sample_action(self, state, stddev=1.0):
"""Returns the action for the state with additive noise.
Args:
state: A [num_state_dims] tensor representing a state.
stddev: stddev for the Ornstein-Uhlenbeck noise.
Returns:
A [num_action_dims] action tensor.
"""
agent_action = self.action(state)
agent_action += tf.random_normal(tf.shape(agent_action)) * stddev
return utils.clip_to_spec(agent_action, self._action_spec)
def _validate_states(self, states):
"""Raises a value error if `states` does not have the expected shape.
Args:
states: A tensor.
Raises:
ValueError: If states.shape or states.dtype are not compatible with
observation_spec.
"""
states.shape.assert_is_compatible_with(self._state_shape)
if not states.dtype.is_compatible_with(self._observation_spec.dtype):
raise ValueError('states.dtype={} is not compatible with'
' observation_spec.dtype={}'.format(
states.dtype, self._observation_spec.dtype))
def _validate_actions(self, actions):
"""Raises a value error if `actions` does not have the expected shape.
Args:
actions: A tensor.
Raises:
ValueError: If actions.shape or actions.dtype are not compatible with
action_spec.
"""
actions.shape.assert_is_compatible_with(self._action_shape)
if not actions.dtype.is_compatible_with(self._action_spec.dtype):
raise ValueError('actions.dtype={} is not compatible with'
' action_spec.dtype={}'.format(
actions.dtype, self._action_spec.dtype)) |
from typing import Dict, Union
from math import sqrt
from spacy.tokens import Doc
from .basic_stats import BasicStats
from .constants import READABILITY_STATS_DESC
from .extractors import SentsExtractor, WordsExtractor
class ReadabilityStats(object):
"""
Класс для вычисления основных метрик удобочитаемости текста
Пример использования:
>>> from ruts import ReadabilityStats
>>> text = "Ног нет, а хожу, рта нет, а скажу: когда спать, когда вставать, когда работу начинать"
>>> rs = ReadabilityStats(text)
>>> rs.get_stats()
{'automated_readability_index': 0.2941666666666656,
'coleman_liau_index': 0.2941666666666656,
'flesch_kincaid_grade': 3.4133333333333304,
'flesch_reading_easy': 83.16166666666666,
'lix': 48.333333333333336,
'smog_index': 0.05}
Аргументы:
source (str|Doc): Источник данных (строка или объект Doc)
sents_extractor (SentsExtractor): Инструмент для извлечения предложений
words_extractor (WordsExtractor): Инструмент для извлечения слов
Атрибуты:
flesch_kincaid_grade (float): Тест Флеша-Кинкайда
flesch_reading_easy (float): Индекс удобочитаемости Флеша
coleman_liau_index (float): Индекс Колман-Лиау
smog_index (float): Индекс SMOG
automated_readability_index (float): Автоматический индекс удобочитаемости
lix (float): Индекс удобочитаемости LIX
Методы:
get_stats: Получение вычисленных метрик удобочитаемости текста
print_stats: Отображение вычисленных метрик удобочитаемости текста с описанием на экран
Исключения:
ValueError: Если в источнике данных отсутствуют слова
"""
def __init__(
self,
source: Union[str, Doc],
sents_extractor: SentsExtractor = None,
words_extractor: WordsExtractor = None,
):
self.bs = BasicStats(source, sents_extractor, words_extractor)
if not self.bs.n_words:
raise ValueError("В источнике данных отсутствуют слова")
@property
def flesch_kincaid_grade(self):
return calc_flesch_kincaid_grade(self.bs.n_syllables, self.bs.n_words, self.bs.n_sents)
@property
def flesch_reading_easy(self):
return calc_flesch_reading_easy(self.bs.n_syllables, self.bs.n_words, self.bs.n_sents)
@property
def coleman_liau_index(self):
return calc_coleman_liau_index(self.bs.n_letters, self.bs.n_words, self.bs.n_sents)
@property
def smog_index(self):
return calc_smog_index(self.bs.n_complex_words, self.bs.n_sents)
@property
def automated_readability_index(self):
return calc_automated_readability_index(
self.bs.n_letters, self.bs.n_words, self.bs.n_sents
)
@property
def lix(self):
return calc_lix(self.bs.n_long_words, self.bs.n_words, self.bs.n_sents)
def get_stats(self) -> Dict[str, float]:
"""
Получение вычисленных метрик удобочитаемости текста
Вывод:
dict[str, float]: Справочник вычисленных метрик удобочитаемости текста
"""
return {
"flesch_kincaid_grade": self.flesch_kincaid_grade,
"flesch_reading_easy": self.flesch_reading_easy,
"coleman_liau_index": self.coleman_liau_index,
"smog_index": self.smog_index,
"automated_readability_index": self.automated_readability_index,
"lix": self.lix,
}
def print_stats(self):
"""Отображение вычисленных метрик удобочитаемости текста с описанием на экран"""
print(f"{'Метрика':^40}|{'Значение':^10}")
print("-" * 50)
for stat, value in READABILITY_STATS_DESC.items():
print(f"{value:40}|{self.get_stats().get(stat):^10.2f}")
def calc_flesch_kincaid_grade(
n_syllables: int,
n_words: int,
n_sents: int,
a: float = 0.49,
b: float = 7.3,
c: float = 16.59,
) -> float:
"""
Вычисление теста Флеша-Кинкайда
Описание:
Чем выше показатель, тем сложнее текст для чтения
Результатом является число лет обучения в американской системе образования, необходимых для понимания текста
Ссылки:
https://en.wikipedia.org/wiki/Flesch–Kincaid_readability_tests#Flesch–Kincaid_grade_level
Аргументы:
n_syllables (int): Количество слогов
n_words (int): Количество слов
n_sents (int): Количество предложений
a (float): Коэффициент a
b (float): Коэффициент b
c (float): Коэффициент c
Вывод:
float: Значение теста
"""
return (a * n_words / n_sents) + (b * n_syllables / n_words) - c
def calc_flesch_reading_easy(
n_syllables: int,
n_words: int,
n_sents: int,
a: float = 1.3,
b: float = 60.1,
c: float = 206.835,
) -> float:
"""
Вычисление индекса удобочитаемости Флеша
Описание:
Чем выше показатель, тем легче текст для чтения
Значения индекса лежат в пределах от 0 до 100 и могут интерпретироваться следующим образом:
100-90 - 5-й класс
90-80 - 6-й класс
80-70 - 7-й класс
70-60 - 8-й и 9-й класс
60-50 - 10-й и 11-й класс
50-30 - Студент университета
30-0 - Выпускник университета
Ссылки:
https://ru.wikipedia.org/wiki/Индекс_удобочитаемости
https://en.wikipedia.org/wiki/Flesch–Kincaid_readability_tests#Flesch_reading_ease
Аргументы:
n_syllables (int): Количество слогов
n_words (int): Количество слов
n_sents (int): Количество предложений
a (float): Коэффициент a
b (float): Коэффициент b
c (float): Коэффициент c
Вывод:
float: Значение индекса
"""
return c - (a * n_words / n_sents) - (b * n_syllables / n_words)
def calc_coleman_liau_index(
n_letters: int,
n_words: int,
n_sents: int,
a: float = 6.26,
b: float = 0.2805,
c: float = 31.04,
) -> float:
"""
Вычисление индекса Колман-Лиау
Описание:
Чем выше показатель, тем сложнее текст для чтения
Результатом является число лет обучения в американской системе образования, необходимых для понимания текста
Ссылки:
https://ru.wikipedia.org/wiki/Индекс_Колман_—_Лиау
https://en.wikipedia.org/wiki/Coleman–Liau_index
Аргументы:
n_letters (int): Количество букв
n_words (int): Количество слов
n_sents (int): Количество предложений
a (float): Коэффициент a
b (float): Коэффициент b
c (float): Коэффициент c
Вывод:
float: Значение индекса
"""
return (a * n_letters / n_words) + (b * n_words / n_sents) - c
def calc_smog_index(
n_complex: int, n_sents: int, a: float = 1.1, b: float = 64.6, c: float = 0.05
) -> float:
"""
Вычисление индекса SMOG
Описание:
Simple Measure of Gobbledygook («Простое измерение разглагольствований»)
Наиболее авторитетная метрика читабельности
Чем выше показатель, тем сложнее текст для чтения
Результатом является число лет обучения в американской системе образования, необходимых для понимания текста
Ссылки:
https://en.wikipedia.org/wiki/SMOG
Аргументы:
n_complex (int): Количество сложных слов
n_sents (int): Количество предложений
a (float): Коэффициент a
b (float): Коэффициент b
c (float): Коэффициент c
Вывод:
float: Значение индекса
"""
return (a * sqrt(b * n_complex / n_sents)) + c
def calc_automated_readability_index(
n_letters: int,
n_words: int,
n_sents: int,
a: float = 6.26,
b: float = 0.2805,
c: float = 31.04,
) -> float:
"""
Вычисление автоматического индекса удобочитаемости
Описание:
Чем выше показатель, тем сложнее текст для чтения
Результатом является число лет обучения в американской системе образования, необходимых для понимания текста
Значения индекса могут интерпретироваться следующим образом:
1 - 6-7 лет
2 - 7-8 лет
3 - 8-9 лет
4 - 9-10 лет
5 - 10-11 лет
6 - 11-12 лет
7 - 12-13 лет
8 - 13-14 лет
9 - 14-15 лет
10 - 15-16 лет
11 - 16-17 лет
12 - 17-18 лет
Ссылки:
https://en.wikipedia.org/wiki/Automated_readability_index
https://ru.wikipedia.org/wiki/Автоматический_индекс_удобочитаемости
Аргументы:
n_letters (int): Количество букв
n_words (int): Количество слов
n_sents (int): Количество предложений
a (float): Коэффициент a
b (float): Коэффициент b
c (float): Коэффициент c
Вывод:
float: Значение индекса
"""
return (a * n_letters / n_words) + (b * n_words / n_sents) - c
def calc_lix(n_long_words: int, n_words: int, n_sents: int) -> float:
"""
Вычисление индекса удобочитаемости LIX
Описание:
Чем выше показатель, тем сложнее текст для чтения
Значения индекса лежат в пределах от 0 до 100 и могут интерпретироваться следующим образом:
0-30 - Очень простые тексты, детская литература
30-40 - Простые тексты, художественная литература, газетные статьи
40-50 - Тексты средней сложности, журнальные статьи
50-60 - Сложные тексты, научно-популярные статьи, профессиональная литература, официальные тексты
60-100 - Очень сложные тексты, написанные канцелярским языком, законы
Ссылки:
https://en.wikipedia.org/wiki/Lix_(readability_test)
https://ru.wikipedia.org/wiki/LIX
Аргументы:
n_long_words (int): Количество длинных слов
n_words (int): Количество слов
n_sents (int): Количество предложений
Вывод:
float: Значение индекса
"""
return (n_words / n_sents) + (100 * n_long_words / n_words)
|
import os
import pytest
from motifscan.cli.genome import run
from motifscan.cli.main import configure_parser_main
from motifscan.config import Config
from motifscan.genome import Genome
parser = configure_parser_main()
def test_cli_genome_list(tmp_dir, capsys):
config_file = os.path.join(tmp_dir, "test_cli_genome.motifscanrc")
config = Config(config_file)
config.set_genome_path("hg19", "/path/to/genome1")
config.set_genome_path("hg38", "/path/to/genome2")
config.write()
args = parser.parse_args(["genome", "--list"])
run(args=args, config_file=config_file)
captured = capsys.readouterr()
assert captured.out == "hg19\nhg38\n"
def test_cli_genome_list_remote(capsys):
args = parser.parse_args(["genome", "--list-remote"])
run(args=args)
captured = capsys.readouterr()
assert captured.out
def test_cli_genome_search(capsys):
args = parser.parse_args(["genome", "--search", "human"])
run(args=args)
captured = capsys.readouterr()
assert captured.out
def test_cli_genome_install(genome_root, tmp_dir):
config_file = os.path.join(tmp_dir, "test_cli_genome.motifscanrc")
config = Config(config_file)
config.set_genome_dir(tmp_dir)
config.write()
fasta_path = os.path.join(genome_root, "test", "test.fa")
gene_path = os.path.join(genome_root, "test", "test_gene_annotation.txt")
args = parser.parse_args(
["genome", "--install", "-n", "test_genome", "-i", fasta_path, "-a",
gene_path])
run(args=args, config_file=config_file)
genome_path = os.path.join(tmp_dir, "test_genome")
genome = Genome(name="test_genome", path=genome_path)
assert genome.fetch_sequence("chr1", 0, 10) == "AaTtCcGgNn"
assert genome.genes
config = Config(config_file)
assert config.has_genome_assembly("test_genome")
assert config.get_genome_path("test_genome") == genome_path
def test_cli_genome_uninstall(genome_root, tmp_dir):
config_file = os.path.join(tmp_dir, "test_cli_genome.motifscanrc")
config = Config(config_file)
config.set_genome_dir(tmp_dir)
config.write()
args = parser.parse_args(["genome", "--uninstall", "test_genome"])
run(args=args, config_file=config_file)
config = Config(config_file)
assert not config.has_genome_assembly("test_genome")
genome_path = os.path.join(tmp_dir, "test_genome")
assert not os.path.isdir(genome_path)
args = parser.parse_args(["genome", "--uninstall", "test_genome1"])
with pytest.raises(SystemExit):
run(args=args, config_file=config_file)
|
import os
import sys
import click
from zipfile import ZipFile, ZIP_DEFLATED
import pathlib
import hashlib
import re
from loguetools import og, xd, common
from loguetools import version
XD_PATCH_LENGTH = 1024
def explode(filename, match_name, match_ident, prepend_id, append_md5_4, append_version, unskip_init):
"""Explode a minilogue og or xd or prologue program bank or extract a program.
\b
Examples
--------
explode xd_program_bank.mnlgxdlib
explode -n XDProgName xd_program_bank.mnlgxdlib
"""
zipobj = ZipFile(filename, "r", compression=ZIP_DEFLATED, compresslevel=9)
proglist = common.zipread_progbins(zipobj)
proginfo_dict = common.zipread_all_prog_info(zipobj)
if match_name is not None:
match_ident = common.id_from_name(zipobj, match_name)
if match_ident is not None:
proglist = [proglist[match_ident - 1]]
# Create directory based on the filename stem
input_file = pathlib.Path(filename)
dir_path = input_file.with_suffix("")
dir_path.mkdir(exist_ok=True)
if input_file.suffix in {".mnlgxdpreset", ".mnlgxdlib"}:
suffix = ".mnlgxdprog"
flavour = "xd"
elif input_file.suffix in {".mnlgpreset", ".mnlglib"}:
suffix = ".mnlgprog"
flavour = "og"
elif input_file.suffix in {".prlgpreset", ".prlglib"}:
suffix = ".prlgprog"
flavour = "prologue"
elif input_file.suffix in {".molgpreset", ".molglib"}:
suffix = ".molgprog"
flavour = "monologue"
elif input_file.suffix in {".kklib"}:
suffix = ".kkprog"
flavour = "kk"
fileinfo_xml = common.fileinfo_xml(flavour, [0])
# Read any copyright and author information if available
copyright = None
author = None
comment = None
if input_file.suffix in {".mnlgxdpreset", ".mnlgpreset", ".prlgpreset", ".molgpreset"}:
author, copyright = common.author_copyright_from_presetinformation_xml(zipobj)
sanitise = common.sanitise_patchname()
for i, p in enumerate(proglist):
patchdata = zipobj.read(p)
hash = hashlib.md5(patchdata).hexdigest()
flavour = common.patch_type(patchdata)
if common.is_init_patch(flavour, hash):
# Init Program identified based on hash; i.e. a "True" Init Program
continue
prgname = common.program_name(patchdata, flavour)
if common.is_init_program_name(prgname) and not unskip_init:
# Init Program found and option not to skip is unchecked
continue
if prepend_id:
prgname = f"{i+1:03d}_{prgname}"
if append_md5_4:
hash = hashlib.md5(patchdata).hexdigest()
prgname = f"{prgname}-{hash[:4]}"
if append_version:
ver = version.__version__.replace(".", "")
prgname = f"{prgname}-v{ver}"
output_path = (dir_path / (sanitise(prgname) + suffix))
with ZipFile(output_path, "w") as zip:
binary = zipobj.read(p)
# .prog_bin record/file
zip.writestr(f"Prog_000.prog_bin", binary)
# .prog_info record/file
# Use any available presetinformation_xml author and copyright fields
if author is not None:
comment = f"Author: {author}"
proginfo_comment = (proginfo_dict[p])['Comment']
if proginfo_comment is not None:
comment = f"{comment}, " + proginfo_comment
prog_info_template = common.prog_info_template_xml(flavour, comment=comment, copyright=copyright)
zip.writestr(f"Prog_000.prog_info", prog_info_template)
# FileInformation.xml record/file
zip.writestr(f"FileInformation.xml", fileinfo_xml)
print(f"{int(p[5:8])+1:03d}: {prgname:<12s} -> {output_path}")
@click.command()
@click.argument("filename", type=click.Path(exists=True))
@click.option("--match_name", "-n", help="Dump the patch with name NAME")
@click.option("--match_ident", "-i", type=int, help="Dump the patch with ident ID")
@click.option("--prepend_id", "-p", is_flag=True, help="Prepend patch ID to the filename")
@click.option("--append_md5_4", "-m", is_flag=True, help="Append 4 digits of an md5 checksum to the filename")
@click.option("--append_version", "-v", is_flag=True, help="Append loguetools version to the filename")
@click.option("--unskip_init", "-u", is_flag=True, help="Don't skip patches named Init Program")
def click_explode(filename, match_name, match_ident, prepend_id, append_md5_4, append_version, unskip_init):
explode(filename, match_name, match_ident, prepend_id, append_md5_4, append_version, unskip_init)
if __name__ == "__main__":
click_explode()
|
"""
The following code is not meant to be run because
there's no input. Instead, analyze it's running time
in terms of Big-O. The first two lines are already
analyzed for you. Do the same for all the other lines.
The input of the problem is ex_list, and assume it has
n elements. At the end, put the total running time of
code.
ex_list = [?,?,?,...]#Input,O(1)
for i in range(2):#O(1)
ex_list.insert(0,1)#O(n)
ex_list.append(1)#O(1)
for number in ex_list:#O(1)
for number in ex_list:#O(1)
break#O(1)
break#O(1)
#Total running time = O(n)
"""
|
from flask import Flask
from flask_restful import Api
app = Flask(__name__)
api = Api(app)
from views import Orders, SingleOrder
api.add_resource(Orders, '/prod')
api.add_resource(SingleOrder, '/prod/<int:id>')
if __name__ == '__main__':
app.run(debug = True)
|
import os
import sys
import itertools
sys.path.append('../common')
import spacy
import numpy as np
from spacy.en import English
import re
nlp = English()
def replace_tokenizer(nlp):
old_tokenizer = nlp.tokenizer
nlp.tokenizer = lambda string: old_tokenizer.tokens_from_list(string.split())
replace_tokenizer(nlp)
NOUNS=[u'NNP',u'NN', u'NNS', u'NNPS',u'CD',u'PRP',u'PRP$']
ADJECTIVES=[u'JJ',u'JJR',u'JJS']
VERBS=[u'VB', u'VBN', u'VBD', u'VBG', u'VBP']
S_WORDS=[u'IN',u'WDT',u'TO',u'DT']
from okr import *
class V2:
def __init__(self, name,nodes,edges, argument_alignment,sentences):
self.name=okr.name #object_name
self.edges=edges #a dictionary of edge_id to edge
self.nodes=nodes #a dictionary of node_id to node
self.argument_alignment=argument_alignment # a list of groups of edges aligned to same argument
self.sentences=sentences #original sentences (same as in okr)
class Node:
def __init__(self, id, name, mentions, label,entailment):
self.mentions=mentions #dictionary of mention_id to mention of node
self.id=id #id of node- currenty "E+original_entity_id" or "P+original_proposition_id"
self.name=name #name of node, the same as the name of original entity/proposition
self.label=label #a set of all the mention terms of this node
self.entailment=entailment #entailment graph for node (see entailment_graph object in okr)
class NodeMention:
def __init__(self, id, sentence_id, indices, term, parent):
self.id = id #id of mention - same as in okr (an int)
self.sentence_id = sentence_id#sentence in which the mention appear
self.indices = indices #indices of the mention
self.term = term #words in the mention
self.parent = parent #id of the node to which the node mention belong
class Edge:
def __init__(self, id, start_node,end_node):
self.id=id #edge id - currently in the format of "Node_id_start_node+_+Node_id_end_node"
self.start_node=start_node#start node
self.end_node=end_node#end node
self.mentions={} #dictionary of mention id to mention
self.label=[]# set of all terms of all mentions
class EdgeMention:
def __init__(self, id, sentence_id, indices, terms,template, all_nodes,main_pred, embedded, is_explicit):
self.id = id #edge mention id - currently in the form of "original_proposition_id+_+original_mention_id"
self.sentence_id = sentence_id #sentence of mention
self.indices = indices #indices of mention
self.terms = terms #terms of edge mention
self.template = template #template of edge mention
self.all_nodes=all_nodes #all nodes wich appear in the template
self.embedded=embedded #edge mentions of embedded predicates in template. dictionary of embedded predicate to edge mentions.
self.embedded_edges={} #edges belonging to edge mentions of embedded predicates in template.
#dictionary of embedded predicate to edges.
self.is_explicit=is_explicit #is edge created from explicit proposition (like in okr)
self.main_pred=main_pred #the main predicate of the edge
self.parents=[] #all edges in which the edge mention appear
def change_template_predicate(template, terms, p_id):
if terms==None or terms=="":
#print ("as is: "+template+" "+p_id)
return template
template=" "+template+" "
terms=" "+terms.lower()+" "
#print(template+"\n")
#print(terms+"\n")
if template.find(terms)==-1:
print(p_id+": non-consecutive predicate:|"+terms+"| in the template: |"+template+"|")
return template
#TODO: handle
if len([n for n in re.finditer(terms, template)])>1:
print("terms found more than once"+terms+template)
new_template=template.replace(terms," ["+str(p_id)+"] ")
#print (new_template+"\n")
return new_template
def change_template_arguments(template, arguments):
#print(template)
#print({a:b.parent_id for a,b in arguments.iteritems()})
new_template=template
for arg_id, arg in arguments.iteritems():
arg_str="[a"+str(int(arg_id)+1)+"]"
element = "E" if arg.mention_type==0 else "P"
element_id="["+element+str(arg.parent_id)+"]"
new_template=new_template.replace(arg_str,element_id)
#print (new_template)
return new_template
def get_embedded_mentions(arguments):
embedded={}
for arg_id, arg in arguments.iteritems():
if arg.mention_type==1: #embedded predicate
element_id="P"+str(arg.parent_id)
#print element_id
embedded[element_id]=element_id+"_"+str(arg.parent_mention_id)
#print embedded
return embedded
def extract_nodes_from_templates(template):
nodes=[]
while template.find("[")>-1:
start=template.find("[")
end=template.find("]")
node=template[start+1:end]
nodes.append(node)
template=template[end+1:]
return nodes
input_file=sys.argv[1]
okr = load_graph_from_file(input_file)
#entities to nodes:
Entity_Nodes={}
for e_id,e in okr.entities.iteritems():
new_entity_id="E"+str(e_id)
mentions={m_id:NodeMention(m_id,m.sentence_id,m.indices,m.terms, new_entity_id) for m_id,m in e.mentions.iteritems()}
Entity_Nodes[new_entity_id]=Node(new_entity_id,e.name, mentions, e.terms,e.entailment_graph)
#predicates to nodes:
Proposition_Nodes={}
Edge_Mentions=[]
for p_id,p in okr.propositions.iteritems():
new_p_id="P"+str(p_id)
prop_mentions={m_num:[[num,pos.orth_,pos.tag_] for num,pos in enumerate(nlp(unicode(" ".join(okr.sentences[m.sentence_id])))) if num in m.indices] for m_num,m in p.mentions.iteritems() if m.is_explicit}
new_terms={m_num:" ".join([str(word[1]) for word in m if word[2] not in S_WORDS])for m_num,m in prop_mentions.iteritems()}
new_indices={m_num:[word[0] for word in m if word[2] not in S_WORDS ]for m_num,m in prop_mentions.iteritems()}
new_terms_all=set([m for m in new_terms.values()])
new_mentions={m_num: NodeMention(m_num,m.sentence_id,new_indices[m_num],new_terms[m_num], new_p_id) for m_num,m in p.mentions.iteritems() if m.is_explicit}
if (not (len(new_terms_all)==1 and [n for n in new_terms_all][0]=="")) and len(new_terms_all)>0:#not empty predicate
Proposition_Nodes[new_p_id]=Node(new_p_id,p.name, new_mentions, new_terms_all,p.entailment_graph)
#create new templates:
new_indices_edge={m_num:[word[0] for word in m if word[2] in S_WORDS ]for m_num,m in prop_mentions.iteritems()}
new_terms_edge={m_num:" ".join([str(word[1]) for word in m if word[2] in S_WORDS])for m_num,m in prop_mentions.iteritems()}
#replace predicates with nodes:
new_templates_w_args= {m_num: change_template_predicate(m.template, new_terms.get(m_num,None), new_p_id) for m_num, m in p.mentions.iteritems()}
#replace arguments:
new_templates= {m_num:change_template_arguments(template, p.mentions[m_num].argument_mentions) for m_num, template in new_templates_w_args.iteritems()}
#get mentions of embedded predicates:
embedded= {m_num:get_embedded_mentions(m.argument_mentions) for m_num,m in p.mentions.iteritems()}
#assign main predicates only to propositions with a non-stop word predicate:
main_predicates={m_num:new_p_id if not new_terms.get(m_num,"")=="" else None for m_num,m in p.mentions.iteritems()}
#extract edge mentions:
Edge_Mentions=Edge_Mentions+[EdgeMention(new_p_id+"_"+str(m_num) , m.sentence_id, new_indices_edge.get(m_num,-1), new_terms_edge.get(m_num,""),new_templates[m_num], extract_nodes_from_templates(new_templates[m_num]), main_predicates[m_num],embedded[m_num], m.is_explicit) for m_num,m in p.mentions.iteritems()]
Nodes={}
Nodes.update(Entity_Nodes)
Nodes.update(Proposition_Nodes)
#create edges for all pairs of nodes that are connected by edge:
Edges={}
for edge_mention in Edge_Mentions:
if edge_mention.main_pred==None:
pairs=list(itertools.combinations(edge_mention.all_nodes, 2))
else:
pairs=[(edge_mention.main_pred,node) for node in edge_mention.all_nodes if not edge_mention.main_pred==node]
for pair in pairs:
edge_id="_".join(pair)
if edge_id not in Edges:
Edges[edge_id]=Edge(edge_id,pair[0],pair[1])
Edges[edge_id].mentions[edge_mention.id]=edge_mention
edge_mention.parents.append(edge_id)
#set edge labels:
for edge in Edges.values():
edge.label=set([mention.template for mention in edge.mentions.values()])
#turn embedded predicate mentions to edges:
Edge_Mentions_dict={m.id:m for m in Edge_Mentions}
for edge in Edges.values():
for mention in edge.mentions.values():
new_embedded={}
for e in mention.embedded:
new_embedded[e]=Edge_Mentions_dict[mention.embedded[e]].parents
mention.embedded_edges=new_embedded
#Add argument alinment links:
Args={}
Argument_Alignment={}
for p_id,p in okr.propositions.iteritems():
new_p_id="P"+str(p_id)
Args[new_p_id]={}
for m in p.mentions.values():
for a_id,a in m.argument_mentions.iteritems():
element = "E" if a.mention_type==0 else "P"
element_id=element+str(a.parent_id)
if a_id not in Args[new_p_id]:
Args[new_p_id][a_id]=set()
Args[new_p_id][a_id].add(element_id)
alignment=[[new_p_id+"_"+element for element in v] for k,v in Args[new_p_id].iteritems() if len(v)>1 ]
if (len(alignment)>0):
Argument_Alignment[new_p_id]=alignment
#create final V2 object:
v2=V2(okr.name,Nodes,Edges, Argument_Alignment, okr.sentences)
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: GPL-3.0-or-later
from json import loads
from bases.FrameworkServices.UrlService import UrlService
ORDER = [
'queries',
'queries_dropped',
'packets_dropped',
'answers',
'backend_responses',
'backend_commerrors',
'backend_errors',
'cache',
'servercpu',
'servermem',
'query_latency',
'query_latency_avg'
]
CHARTS = {
'queries': {
'options': [None, 'Client queries received', 'queries/s', 'queries', 'dnsdist.queries', 'line'],
'lines': [
['queries', 'all', 'incremental'],
['rdqueries', 'recursive', 'incremental'],
['empty-queries', 'empty', 'incremental']
]
},
'queries_dropped': {
'options': [None, 'Client queries dropped', 'queries/s', 'queries', 'dnsdist.queries_dropped', 'line'],
'lines': [
['rule-drop', 'rule drop', 'incremental'],
['dyn-blocked', 'dynamic block', 'incremental'],
['no-policy', 'no policy', 'incremental'],
['noncompliant-queries', 'non compliant', 'incremental']
]
},
'packets_dropped': {
'options': [None, 'Packets dropped', 'packets/s', 'packets', 'dnsdist.packets_dropped', 'line'],
'lines': [
['acl-drops', 'acl', 'incremental']
]
},
'answers': {
'options': [None, 'Answers statistics', 'answers/s', 'answers', 'dnsdist.answers', 'line'],
'lines': [
['self-answered', 'self answered', 'incremental'],
['rule-nxdomain', 'nxdomain', 'incremental', -1],
['rule-refused', 'refused', 'incremental', -1],
['trunc-failures', 'trunc failures', 'incremental', -1]
]
},
'backend_responses': {
'options': [None, 'Backend responses', 'responses/s', 'backends', 'dnsdist.backend_responses', 'line'],
'lines': [
['responses', 'responses', 'incremental']
]
},
'backend_commerrors': {
'options': [None, 'Backend Communication Errors', 'errors/s', 'backends', 'dnsdist.backend_commerrors', 'line'],
'lines': [
['downstream-send-errors', 'send errors', 'incremental']
]
},
'backend_errors': {
'options': [None, 'Backend error responses', 'responses/s', 'backends', 'dnsdist.backend_errors', 'line'],
'lines': [
['downstream-timeouts', 'timeout', 'incremental'],
['servfail-responses', 'servfail', 'incremental'],
['noncompliant-responses', 'non compliant', 'incremental']
]
},
'cache': {
'options': [None, 'Cache performance', 'answers/s', 'cache', 'dnsdist.cache', 'area'],
'lines': [
['cache-hits', 'hits', 'incremental'],
['cache-misses', 'misses', 'incremental', -1]
]
},
'servercpu': {
'options': [None, 'DNSDIST server CPU utilization', 'ms/s', 'server', 'dnsdist.servercpu', 'stacked'],
'lines': [
['cpu-sys-msec', 'system state', 'incremental'],
['cpu-user-msec', 'user state', 'incremental']
]
},
'servermem': {
'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
'lines': [
['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
]
},
'query_latency': {
'options': [None, 'Query latency', 'queries/s', 'latency', 'dnsdist.query_latency', 'stacked'],
'lines': [
['latency0-1', '1ms', 'incremental'],
['latency1-10', '10ms', 'incremental'],
['latency10-50', '50ms', 'incremental'],
['latency50-100', '100ms', 'incremental'],
['latency100-1000', '1sec', 'incremental'],
['latency-slow', 'slow', 'incremental']
]
},
'query_latency_avg': {
'options': [None, 'Average latency for the last N queries', 'microseconds', 'latency',
'dnsdist.query_latency_avg', 'line'],
'lines': [
['latency-avg100', '100', 'absolute'],
['latency-avg1000', '1k', 'absolute'],
['latency-avg10000', '10k', 'absolute'],
['latency-avg1000000', '1000k', 'absolute']
]
}
}
class Service(UrlService):
def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
def _get_data(self):
data = self._get_raw_data()
if not data:
return None
return loads(data)
|
def solution(number, k):
answer = [number[0]]
for num in number[1: ]:
while answer and answer[-1] < num and k > 0 :
answer.pop()
k -= 1
answer.append(num)
if k != 0 :
answer.pop()
return ''.join(answer)
answer = solution("77777", 1)
print(answer)
# def solution(number, k) :
# answer = ''
# for ind in range(len(number)) :
# if k == 0 :
# answer += number[ind:]
# return answer
# if len(answer) == len(number) - k :
# return answer
# comp = number[ind : ind + k +1]
# if int(number[ind]) == int(max(comp)) :
# answer += number[ind]
# else :
# k -= 1
# if len(answer) == len(number) :
# return answer[:len(number) - k]
|
"""This module holds all custom exception class definitions."""
from __future__ import division, absolute_import, print_function
from builtins import bytes, dict, int, range, str, super # noqa
class FunkyError(Exception):
"""Base custom exception class."""
def __init__(self, *args, **kwargs):
returncode = kwargs.pop('returncode', 1)
super().__init__(*args, **kwargs)
self.returncode = returncode
class FunkNotDefinedError(FunkyError):
"""Raised when an undefined funk is referenced in a mannor that is not allowed."""
def __init__(self, *args, **kwargs):
funk = kwargs.pop('funk', None)
global_ = kwargs.pop('global_', False)
if funk is None:
if global_:
msg = 'No global funks are defined.'
else:
msg = 'No local funks are defined in the current directory.'
else:
msg_fmt = '"{}" does not match any local funks defined in the current '\
'directory.'.format(funk)
msg = msg_fmt.format(funk)
super().__init__(msg, *args, **kwargs)
class ArgumentError(FunkyError):
"""Raised when the given command-line arguments fail validation check."""
class BlankDefinition(FunkyError):
"""Raised when the user attempts to define a funk using a blank definition."""
|
class NameTooShortError(Exception):
'''Name must be more than 4 characters'''
class MustContainAtSymbolError(Exception):
'''Email must contain @'''
pass
class InvalidDomainError(Exception):
'''Domain must be one of the following: .com, .bg, .org, .net'''
pass
def check_name(email):
name_len = 0
#construct name
for char in email:
if char == "@":
break
else:
name_len += 1
if name_len <= 4:
raise NameTooShortError("Name must be more than 4 characters")
else:
return True
def check_symbol_present(email, symbol):
if symbol in email:
return True
else:
raise MustContainAtSymbolError("Email must contain @")
def check_domain(email):
valid_domains = {".com", ".bg", ".org", ".net"}
#check after the "."
index = email.index(".")
if email[index::] in valid_domains:
return True
else:
raise InvalidDomainError("Domain must be one of the folowing: .com, .bg, .org, .net")
while True:
email = input()
if (check_symbol_present(email, "@") and
check_name(email) and
check_domain(email)):
print("Email is calid") |
from .BTS import BTS
from .CA import CA
from .MIG import MIG
from .DMI import DMI
__all__ = ['BTS', 'CA', 'MIG', 'DMI']
__version__ = '0.0.5' |
import numpy as np
import pandas as pd
from bs4.element import NavigableString, Comment, Doctype
from report_parser.src.text_class import Text
def print_tag(tag):
print('printing tag:', type(tag), tag.name)
if type(tag) not in [NavigableString, Doctype, Comment]:
for child in tag.children:
print('child:', type(child), child.name)
def get_texts_and_tables(html_elems, new_method):
contents = []
contents_num = len(html_elems)
cur_elem_num = 0
while cur_elem_num < contents_num:
elem_type, elem = html_elems[cur_elem_num]
accumulated_texts = []
table = None
while elem_type == 'text' and cur_elem_num < contents_num:
accumulated_texts.append(elem)
cur_elem_num += 1
if cur_elem_num < contents_num:
elem_type, elem = html_elems[cur_elem_num]
if len(accumulated_texts):
contents.append(Text(accumulated_texts))
accumulated_texts = []
if elem_type == 'table':
# TODO: временная мера для тестирования нового метода
if new_method:
table = parse_table_new(elem)
else:
table = parse_table(elem)
if table.shape[0]:
contents.append(table)
cur_elem_num += 1
return contents
def parse_table_new(table_rows):
"""
Парсинг таблиц, полученныхых в результате работы ParsingTree
"""
df = pd.DataFrame()
for i in range(len(table_rows)):
html_row = table_rows[i]
row = [x for x in html_row]
# Пропускаем пустые строки:
if not any([x[2] for x in row]):
continue
flatten_row = []
# Смотрим на значения каждой ячейки в строке
for col_index in range(len(row)):
row_span = row[col_index][0]
col_span = row[col_index][1]
value = row[col_index][2]
# Заполняем ячейки ниже значениями текущей ячейки
if row_span > 1:
# Берём нужное количество строк, начиная с текущей,
# и в нужный индекс вставляем значение с row_span == 1
for _ in range(row_span):
real_index = sum([x[1] for x in row][:col_index])
cell_value = (1, col_span, value)
table_rows[i + _].insert(real_index, cell_value)
# Копируем значение ячейки в несколько следующих столбцов (или нет)
if col_span == 1:
flatten_row.append(value)
else:
flatten_row.extend([value] * col_span)
# Добавляем список значений строки в датафрейм
df = df.append([flatten_row])
df.reset_index(inplace=True, drop=True)
return df
def parse_table(table_rows):
max_col_num = get_max_colspan(table_rows)
df = pd.DataFrame(columns=range(max_col_num), dtype=str)
col_shifts = [0]
row_shift = 0
for i in range(len(table_rows)):
html_row = table_rows[i]
df_len = len(df)
cur_shift = col_shifts.pop() if col_shifts else 0
if row_shift == 0:
# if True:
df.append(pd.Series(dtype=str), ignore_index=True)
next_row_shift = 0
for j in range(len(html_row)):
cell = html_row[j]
shape = (cell[0], cell[1])
need_rows = shape[0] - (len(df) - df_len)
next_row_shift = max(need_rows - 1, next_row_shift)
for _ in range(need_rows - 1):
df.append(pd.Series(dtype=str), ignore_index=True)
col_shifts.append(cur_shift + shape[1])
for cell_row_n, cell_col_n in np.ndindex((shape[0], shape[1])):
row = df_len - row_shift + cell_row_n
col = cur_shift + cell_col_n
df.loc[row, col] = cell[2]
cur_shift += shape[1]
if row_shift:
row_shift -= 1
row_shift = row_shift + next_row_shift
return df
def get_max_colspan(table_rows):
max_col_num = 0
for row in table_rows:
col_num = 0
for cell in row:
col_num += cell[1]
max_col_num = max(max_col_num, col_num)
return (max_col_num)
|
n = int(input())
a = input().split()
d=[] #빈 리스트
for i in range(24):
d.append(0)
#리스트 숫자형으로 변경
for i in range(n):
a[i] = int(a[i])
#a리스트 값을 d로 이동
for i in range(n):
d[a[i]] += 1
for i in range(1,24):
print(d[i],end=" ")
|
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.optim.lr_scheduler import StepLR
from torch.autograd import Variable
from datetime import datetime
import model
from dataset import TextDataset
import pdb
pdb.set_trace()
parser = argparse.ArgumentParser()
# parser.add_argument(
# '--dataset',
# required=True,
# default='folder',
# help='cifar10 | lsun | imagenet | folder | lfw | fake')
parser.add_argument(
'--dataroot', required=True, default='./data/coco', help='path to dataset')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument(
'--batchSize', type=int, default=64, help='input batch size')
parser.add_argument(
'--imageSize',
type=int,
default=64,
help='the height / width of the input image to network')
parser.add_argument(
'--nte',
type=int,
default=1024,
help='the size of the text embedding vector')
parser.add_argument(
'--nt',
type=int,
default=256,
help='the reduced size of the text embedding vector')
parser.add_argument(
'--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument(
'--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument(
'--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument(
'--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument(
'--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument(
'--netG', default='', help="path to netG (to continue training)")
parser.add_argument(
'--netD', default='', help="path to netD (to continue training)")
parser.add_argument(
'--outf',
default='./output/',
help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument(
'--eval',
action='store_true',
help="choose whether to train the model or show demo")
opt = parser.parse_args()
print(opt)
try:
output_dir = os.path.join(opt.outf,
datetime.strftime(datetime.now(), "%Y%m%d_%H%M"))
os.makedirs(output_dir)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(
1, 10000
) #use random.randint(1, 10000) for randomness, shouldnt be done when we want to continue training from a checkpoint
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print(
"WARNING: You have a CUDA device, so you should probably run with --cuda"
)
image_transform = transforms.Compose([
transforms.RandomCrop(opt.imageSize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0, 0, 0), (1, 1, 1))
])
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
nt = int(opt.nt)
nte = int(opt.nte)
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
# m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
netG = model._netG(ngpu, nz, ngf, nc, nte, nt)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = model._netD(ngpu, nc, ndf, nte, nt)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
if not opt.eval:
train_dataset = TextDataset(opt.dataroot, transform=image_transform)
## Completed - TODO: Make a new DataLoader and Dataset to include embeddings
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
# setup optimizer
optimizerD = optim.Adam(
netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(
netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
## Completed TODO: Change the error loss function to include embeddings [refer main_cls.lua on the original paper repo]
for epoch in range(1, opt.niter + 1):
if epoch % 75 == 0:
optimizerG.param_groups[0]['lr'] /= 2
optimizerD.param_groups[0]['lr'] /= 2
for i, data in enumerate(train_dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu, text_embedding, _ = data
batch_size = real_cpu.size(0)
text_embedding = Variable(text_embedding)
if opt.cuda:
real_cpu = real_cpu.cuda()
text_embedding = text_embedding.cuda()
input.resize_as_(real_cpu).copy_(real_cpu)
label.resize_(batch_size).fill_(real_label)
inputv = Variable(input)
labelv = Variable(label)
output = netD(inputv, text_embedding)
errD_real = criterion(output, labelv) ##
errD_real.backward()
D_x = output.data.mean()
### calculate errD_wrong
inputv = torch.cat((inputv[1:], inputv[:1]), 0)
output = netD(inputv, text_embedding)
errD_wrong = criterion(output, labelv) * 0.5
errD_wrong.backward()
# train with fake
noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev, text_embedding)
labelv = Variable(label.fill_(fake_label))
output = netD(fake.detach(), text_embedding)
errD_fake = criterion(output, labelv) * 0.5
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake + errD_wrong
# errD.backward()
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
labelv = Variable(label.fill_(
real_label)) # fake labels are real for generator cost
output = netD(fake, text_embedding)
errG = criterion(output, labelv) ##
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print(
'[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(train_dataloader), errD.data[0],
errG.data[0], D_x, D_G_z1, D_G_z2))
if i % 100 == 0:
vutils.save_image(
real_cpu, '%s/real_samples.png' % output_dir, normalize=True)
fake = netG(fixed_noise, text_embedding)
vutils.save_image(
fake.data,
'%s/fake_samples_epoch_%03d.png' % (output_dir, epoch),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (output_dir,
epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (output_dir,
epoch))
else:
test_dataset = TextDataset(opt.dataroot, transform=image_transform,split='test')
## Completed - TODO: Make a new DataLoader and Dataset to include embeddings
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
for i, data in enumerate(test_dataloader, 0):
real_image, text_embedding,caption = data
batch_size = real_image.size(0)
text_embedding = Variable(text_embedding)
if opt.cuda:
real_image = real_image.cuda()
text_embedding = text_embedding.cuda()
input.resize_as_(real_image).copy_(real_image)
inputv = Variable(input)
noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
num_test_outputs = 10
# for count in range(num_test_outputs):
# print (count)
count =0
print (i)
synthetic_image = netG(noisev, text_embedding)
synthetic_image = synthetic_image.detach()
for i in range(synthetic_image.size()[0]):
cap = caption[i].strip(".")
cap = cap.replace("/"," or ")
cap = cap.replace(" ","_")
if len(cap) > 95:
cap = cap[:95]
file_path = './eval_results/'+cap
# if not os.path.exists(file_path):
# os.makedirs(file_path)
try:
vutils.save_image(synthetic_image[i].data,file_path+'_'+str(count)+'.jpg')
# vutils.save_image(synthetic_image[i].data,os.path.join(file_path,str(count)+'.jpg'))
except e:
print (e)
|
from typing import List
from uuid import UUID
from fastapi import APIRouter
from fastapi.param_functions import Depends
from sqlalchemy.orm.session import Session
from starlette.status import HTTP_201_CREATED
from src.core.controller import item
from src.core.helpers.database import make_session
from src.core.models import Context, CreateItem, Item, QueryItem, UpdateItem
from src.utils.dependencies import context_manager
router = APIRouter()
@router.get("/", response_model=List[Item])
async def get_all(
query: QueryItem = Depends(),
session: Session = Depends(make_session),
context: Context = Depends(context_manager),
):
return item.get_all(session, query, context=context)
@router.get("/{item_id}", response_model=Item)
async def get(item_id: UUID, session: Session = Depends(make_session), context: Context = Depends(context_manager)):
return item.get_by_id(session, item_id=item_id, context=context)
@router.post("/", response_model=Item, status_code=HTTP_201_CREATED)
async def create(
schema: CreateItem,
session: Session = Depends(make_session),
context: Context = Depends(context_manager),
):
return item.create(session, schema, context=context)
@router.delete("/{item_id}", response_model=Item)
async def delete(item_id: UUID, session: Session = Depends(make_session), context: Context = Depends(context_manager)):
return item.delete(session, item_id, context=context)
@router.patch("/", response_model=Item)
async def update(
data: UpdateItem, session: Session = Depends(make_session), context: Context = Depends(context_manager)
):
return item.update(session, data, context=context)
|
import abc
# Componente
class ArchivoComponent(metaclass=abc.ABCMeta):
@abc.abstractmethod
def imprimeEstructura(self):
pass
class ArchivoComposite(ArchivoComponent):
def __init__(self):
self.child_directory = []
def add(self, component):
self.child_directory.append(ArchivoLeaf.union)
def remove(self, component):
self.child_directory.remove(ArchivoLeaf.union)
def imprimeEstructura(self):
for child in self.child_directory:
print(child)
class ArchivoLeaf(ArchivoComposite):
def __init__(self):
self.nombre = None
self.tipo = None
def set_nombre(self, nombre):
self.nombre = nombre
def set_tipo(self, tipo):
self.tipo = tipo
def union(self):
print(self.nombre+self.tipo)
def imprimeEstructura(self):
pass
def main():
leaf = ArchivoLeaf()
directorio = ArchivoComposite()
leaf.set_nombre("documento")
leaf.set_tipo("doc")
directorio.add(leaf)
directorio.imprimeEstructura
if __name__ == "__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.