repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
daegutted/Carteirinha-Vitual
|
backend/modelo.py
|
from ast import Num
from config import *
# Classe pai para funcionário e cidadão
class Pessoa(db.Model):
Id = db.Column(db.Integer, primary_key=True)
NomeCompleto = db.Column(db.String(200))
DtNascimento = db.Column(db.String(100))
Genero = db.Column(db.String(1))
Cpf = db.Column(db.String(100))
Email = db.Column(db.String(100))
Senha = db.Column(db.String(100))
Type = db.Column(db.String(50)) # Discriminador
__mapper_args__ = {
'polymorphic_identity':'pessoa',
'polymorphic_on':Type # nome do campo que vincula os filhos
}
def __str__(self):
return f'{self.Id}, {self.NomeCompleto}, {self.DtNascimento}, {self.Genero}, {self.Cpf}, {self.Email}, {self.Senha}'
# Classe que representa uma unidade de saúde
class Unidade_Saude(db.Model):
Id = db.Column(db.Integer, primary_key=True)
Nome = db.Column(db.String(200))
Email = db.Column(db.String(100))
Senha = db.Column(db.String(100))
Cep = db.Column(db.String(50))
Complemento = db.Column(db.String(100))
Type = 'Unidade_saude'
# Formatação do print no terminal
def __str__(self):
return f'{str(self.Id)}, {self.Nome}, {self.Cep}, {self.Complemento}'
# Criando arquivo Json para envio
def json(self):
return {
"Id": self.Id,
"Nome": self.Nome,
"Cep": self.Cep,
"Complemento": self.Complemento,
"Email": self.Email,
"Senha": self.Senha,
"Type": self.Type
}
# Classe filho que representa um funcionário da und. de saúde
class Funcionario(Pessoa):
UnidadeSaudeId = db.Column(db.Integer, db.ForeignKey(Unidade_Saude.Id), nullable = True)
UnidadeSaude = db.relationship('Unidade_Saude') # Associação com a unid. de saúde
__mapper_args__ = {
'polymorphic_identity':'funcionario',
}
# Formatação do print no terminal
def __str__(self):
return f'{super().__str__()}, {str(self.UnidadeSaudeId)}, {str(self.UnidadeSaude)}, {self.Type}'
# Criando arquivo Json para envio
def json(self):
return {
"Id": self.Id,
"NomeCompleto": self.NomeCompleto,
"DtNascimento": self.DtNascimento,
"Genero": self.Genero,
"Cpf": self.Cpf,
"Email": self.Email,
"Senha": self.Senha,
"UnidadeSaudeId": self.UnidadeSaudeId,
"UnidadeSaude": self.UnidadeSaude.json(), # Reciclando a função da classe Unidade_Saude
"Type": self.Type
}
# Classe filho que representa um cliente da und. de saúde
class Cidadao(Pessoa):
Cep = db.Column(db.String(50))
Complemento = db.Column(db.String(50))
temComorbidades = db.Column(db.Boolean)
TipoComorbidades = db.Column(db.String(200))
__mapper_args__ = {
'polymorphic_identity':'cidadao',
}
# Formatação do print no terminal
def __str__(self):
return f'{super().__str__()}, {self.Cep}, {self.Complemento}, {str(self.temComorbidades)}, {self.TipoComorbidades}, {self.Type}'
# Criando arquivo Json para envio
def json(self):
return {
"Id": self.Id,
"NomeCompleto": self.NomeCompleto,
"DtNascimento": self.DtNascimento,
"Genero": self.Genero,
"Cpf": self.Cpf,
"Email": self.Email,
"Senha": self.Senha,
"Cep": self.Cep,
"Complemento": self.Complemento,
"temComorbidades": self.temComorbidades,
"TipoComorbidade": self.TipoComorbidades,
"Type": self.Type
}
# Casse que representa os agendamentos de vacina
class Agendamento(db.Model):
Id = db.Column(db.Integer, primary_key=True)
NomeVacina = db.Column(db.String(200))
Data = db.Column(db.String(200))
Status = db.Column(db.String(1)) #Padrão: R - recebido, A - agendado
# Conexão com o cidadão
IdCidadao = db.Column(db.Integer, db.ForeignKey(Pessoa.Id), nullable = True)
Cidadao = db.relationship('Pessoa')
# Conexão com a unidade de saude
UnidadeSaudeId = db.Column(db.Integer, db.ForeignKey(Unidade_Saude.Id), nullable = True)
UnidadeSaude = db.relationship('Unidade_Saude')
# Formatação do print no terminal
def __str__(self):
return f'{str(self.Id)}, {self.NomeVacina}, {self.Data}, {self.Status}, {str(self.IdCidadao)}, {str(self.UnidadeSaudeId)}, {self.Cidadao}, {self.UnidadeSaude}'
# Criando arquivo Json para envio
def json(self):
return {
"Id": self.Id,
"NomeVacina": self.NomeVacina,
"Data": self.Data,
"Status": self.Status,
"IdCidadao": self.IdCidadao,
"UnidadeSaudeId": self.UnidadeSaudeId,
"Cidadao": self.Cidadao.json(),
"UnidadeSaude": self.UnidadeSaude.json()
}
# Classe que representa o estoque da Unidade de Saude
class Estoque(db.Model):
Id = db.Column(db.Integer, primary_key=True)
QtdVacina = db.Column(db.Integer)
Descricao = db.Column(db.String(200))
# Conexão com a unidade de saude
UnidadeSaudeId = db.Column(db.Integer, db.ForeignKey(Unidade_Saude.Id), nullable = True)
UnidadeSaude = db.relationship('Unidade_Saude')
# Formatação do print no terminal
def __str__(self):
return f'{str(self.Id)}, {self.QtdVacina}, {self.Descricao}, {str(self.UnidadeSaudeId)}, {self.UnidadeSaude}'
# Criando arquivo Json para envio
def json(self):
return {
"Id": self.Id,
"QtdVacina": self.QtdVacina,
"Descricao": self.Descricao,
"UnidadeSaudeId": self.UnidadeSaudeId,
"UnidadeSaude": self.UnidadeSaude.json()
}
class Vacina(db.Model):
Id = db.Column(db.Integer, primary_key=True)
NomeVacina = db.Column(db.String(50))
Data = db.Column(db.String(10))
Status = db.Column(db.String(1)) #Padrão: R - recebido, A - agendado
# Conexão com o cidadão
CidadaoId = db.Column(db.Integer, db.ForeignKey(Cidadao.Id), nullable = True)
Cidadao = db.relationship('Cidadao')
# Formatação do print no terminal
def __str__(self):
return f'{str(self.Id)}, {self.NomeVacina}, {self.Data}, {str(self.Status)}, {self.CidadaoId} {self.Cidadao}'
# Criando arquivo Json para envio
def json(self):
return {
"Id": self.Id,
"NomeVacina": self.NomeVacina,
"Data": self.Data,
"Status": self.Status,
"Cidadao": self.CidadaoId,
"CidadaoId": self.Cidadao.json()
}
# Bloqueia as seguintes funções quando importado
if __name__ == "__main__":
# Apaga arquivos já existentes para que não tenha repetição de dados
if os.path.exists(arquivobd):
os.remove(arquivobd)
db.create_all() # Cria as tabelas do banco de dados
# Inputs de informações
us1 = Unidade_Saude(Nome = "Post<NAME> esquina", Cep = "10475221", Complemento = "Numero 14542", Email = "<EMAIL>", Senha = "<PASSWORD>")
us2 = Unidade_Saude(Nome = "<NAME>", Cep = "12345-678", Complemento = "Numero 1549", Email = "<EMAIL>", Senha = "<PASSWORD>")
us3 = Unidade_Saude(Nome = "Post<NAME>", Cep = "87654-321", Complemento = "Numero 753", Email = "<EMAIL>", Senha = "<PASSWORD>")
us4 = Unidade_Saude(Nome = "<NAME>", Cep = "87456-321", Complemento = "Numero 159", Email = "<EMAIL>", Senha = "<PASSWORD>")
f1 = Funcionario(NomeCompleto = "<NAME>", DtNascimento = "2003-07-11", Genero = "M", Cpf = "052.827.732-44", Email = "<EMAIL>", \
Senha = "<PASSWORD>", UnidadeSaude = us1)
f2 = Funcionario(NomeCompleto = "Funcionario A", DtNascimento = "1999-11-11", Genero = "F", Cpf = "123.456.789-55", Email = "<EMAIL>", \
Senha = "<PASSWORD>", UnidadeSaude = us2)
f3 = Funcionario(NomeCompleto = "Funcionario B", DtNascimento = "2000-10-10", Genero = "M", Cpf = "234.567.891-33", Email = "<EMAIL>", \
Senha = "<PASSWORD>2", UnidadeSaude = us3)
f4 = Funcionario(NomeCompleto = "Funcion<NAME>", DtNascimento = "1975-08-25", Genero = "F", Cpf = "147.258.369-24", Email = "<EMAIL>", \
Senha = "<PASSWORD>3", UnidadeSaude = us4)
c1 = Cidadao(NomeCompleto = "<NAME>", DtNascimento = "2003-05-20", Genero = "F", Cpf = "180.728.569-58", Email = "<EMAIL>", \
Senha = "joaolindoS2", Cep = "16476261", Complemento = "ap 666", temComorbidades = True, TipoComorbidades = "Cardiopatia|miope|Feia")
c2 = Cidadao(NomeCompleto = "<NAME>", DtNascimento = "1980-08-08", Genero = "M", Cpf = "943.167.248-51", Email = "<EMAIL>", \
Senha = "<PASSWORD>1", Cep = "16385563", Complemento = "numero 154", temComorbidades = False)
c3 = Cidadao(NomeCompleto = "<NAME>", DtNascimento = "1845-07-07", Genero = "F", Cpf = "485.326.256-12", Email = "<EMAIL>", \
Senha = "cidadao2", Cep = "34587711", Complemento = "lote 12", temComorbidades = False)
c4 = Cidadao(NomeCompleto = "<NAME>", DtNascimento = "2000-11-06", Genero = "F", Cpf = "001-002-006-45", Email = "<EMAIL>", \
Senha = "cidadao3", Cep = "23698714", Complemento = "numero 45821", temComorbidades = True, TipoComorbidades = "Insuficiencia renal")
a1 = Agendamento(NomeVacina = "Covid-19", Data = "2021-09-27", Status = "A", Cidadao = c1, UnidadeSaude = us1)
e1 = Estoque(QtdVacina = "300", Descricao = "Covid-19 pfizer", UnidadeSaude = us1)
v1 = Vacina(NomeVacina = "<NAME>", Data = "10/10/2010", Status = "A", Cidadao = c1)
v2 = Vacina(NomeVacina = "Influenza", Data = "11/10/2010", Status = "R", Cidadao = c1)
v3 = Vacina(NomeVacina = "Tetravalente", Data = "12/10/2010", Status = "R", Cidadao = c1)
v4 = Vacina(NomeVacina = "DT", Data = "13/10/2010", Status = "R", Cidadao = c1)
v5 = Vacina(NomeVacina = "Pneumococo", Data = "14/10/2010", Status = "R", Cidadao = c1)
v6 = Vacina(NomeVacina = "VOP", Data = "15/10/2010", Status = "A", Cidadao = c1)
v7 = Vacina(NomeVacina = "BCG", Data = "10/10/1980", Status = "R", Cidadao = c2)
v8 = Vacina(NomeVacina = "Hepatite B", Data = "12/10/1980", Status = "R", Cidadao = c2)
v9 = Vacina(NomeVacina = "Tetravalente", Data = "20/10/2000", Status = "R", Cidadao = c2)
v10 = Vacina(NomeVacina = "<NAME>", Data = "13/10/2000", Status = "R", Cidadao = c2)
v11 = Vacina(NomeVacina = "Pneumococo", Data = "14/10/2020", Status = "R", Cidadao = c3)
v12 = Vacina(NomeVacina = "Influenza", Data = "20/11/2021", Status = "A", Cidadao = c3)
v13 = Vacina(NomeVacina = "BCG", Data = "06/11/2000", Status = "R", Cidadao = c4)
v14 = Vacina(NomeVacina = "Hepatite B", Data = "06/11/2000", Status = "R", Cidadao = c4)
v15 = Vacina(NomeVacina = "VORH", Data = "16/12/2000", Status = "R", Cidadao = c4)
v16 = Vacina(NomeVacina = "Tetravalente", Data = "13/10/2002", Status = "R", Cidadao = c4)
v17 = Vacina(NomeVacina = "HPV", Data = "08/10/2015", Status = "R", Cidadao = c4)
v18 = Vacina(NomeVacina = "HPV", Data = "08/11/2015", Status = "R", Cidadao = c4)
v19 = Vacina(NomeVacina = "HPV", Data = "10/12/2015", Status = "R", Cidadao = c4)
# Adiciona na lista de commit
db.session.add(us1)
db.session.add(us2)
db.session.add(us3)
db.session.add(us4)
db.session.add(f1)
db.session.add(f2)
db.session.add(f3)
db.session.add(f4)
db.session.add(c1)
db.session.add(c2)
db.session.add(c3)
db.session.add(c4)
db.session.add(a1)
db.session.add(e1)
db.session.add(v1)
db.session.add(v2)
db.session.add(v3)
db.session.add(v4)
db.session.add(v5)
db.session.add(v6)
db.session.add(v7)
db.session.add(v8)
db.session.add(v9)
db.session.add(v10)
db.session.add(v11)
db.session.add(v12)
db.session.add(v13)
db.session.add(v14)
db.session.add(v15)
db.session.add(v16)
db.session.add(v17)
db.session.add(v18)
db.session.add(v19)
db.session.commit() # Grava os dados no banco de dados
TodosPessoa = db.session.query(Pessoa).all() # Traz os dados do banco para uma lista
# Imprime as informações
print("")
for i in TodosPessoa:
print(i)
print(i.json())
print("")
TodosAgendamento = db.session.query(Agendamento).all()
for i in TodosAgendamento:
print(i)
print(i.json())
print("")
TodosEstoque = db.session.query(Estoque).all()
for i in TodosEstoque:
print(i)
print(i.json())
print("")
TodosVacina = db.session.query(Vacina).all()
for i in TodosVacina:
print(i)
print(i.json())
print("")
|
Aetf/superpaper
|
superpaper/spanmode.py
|
<gh_stars>100-1000
"""Control host OS desktop background spanning mode when needed."""
import os
import platform
import subprocess
if platform.system() == "Windows":
import winreg
def set_spanmode():
"""Sets host OS desktop background to span all displays."""
pltf = platform.system()
if pltf == "Windows":
# Windows wallpaper fitting style codes:
# Fill = 10
# Fit = 6
# Stretch = 2
# Tile = 0 and there is another key called "TileWallpaper" which needs value 1
# Center = 0 (with key "TileWallpaper" = 0)
# Span = 22
# Both WallpaperStyle and TileWallpaper keys need to be set under HKEY_CURRENT_USER\Control Panel\Desktop
reg_key_desktop = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Control Panel\Desktop',
0, winreg.KEY_SET_VALUE)
winreg.SetValueEx(reg_key_desktop, "WallpaperStyle", 0, winreg.REG_SZ, "22")
winreg.SetValueEx(reg_key_desktop, "TileWallpaper", 0, winreg.REG_SZ, "0")
elif pltf == "Linux":
desk_env = os.environ.get("DESKTOP_SESSION")
if desk_env:
if desk_env in ["gnome", "gnome-wayland", "gnome-xorg",
"unity", "ubuntu",
"pantheon", "budgie-desktop",
"pop"]:
subprocess.run(["gsettings", "set",
"org.gnome.desktop.background", "picture-options",
"spanned"])
elif desk_env in ["cinnamon"]:
subprocess.run(["gsettings", "set",
"org.cinnamon.desktop.background", "picture-options",
"spanned"])
elif desk_env in ["mate"]:
subprocess.run(["gsettings", "set",
"org.mate.background", "picture-options",
"spanned"])
elif desk_env.lower() == "lubuntu" or "lxqt" in desk_env.lower():
try:
subprocess.run(["pcmanfm", "--wallpaper-mode=stretch"])
except OSError:
try:
subprocess.run(["pcmanfm-qt", "--wallpaper-mode=stretch"])
except OSError:
pass
elif pltf == "Darwin":
# Mac support TODO
pass
else:
pass
|
Aetf/superpaper
|
superpaper/configuration_dialogs.py
|
<reponame>Aetf/superpaper<gh_stars>0
"""
GUI dialogs for Superpaper.
"""
import os
import time
import superpaper.perspective as persp
import superpaper.sp_logging as sp_logging
import superpaper.wallpaper_processing as wpproc
from superpaper.data import GeneralSettingsData, ProfileData, TempProfileData, CLIProfileData, list_profiles
from superpaper.message_dialog import show_message_dialog
from superpaper.wallpaper_processing import NUM_DISPLAYS, get_display_data, change_wallpaper_job
from superpaper.sp_paths import PATH, CONFIG_PATH, PROFILES_PATH
try:
import wx
import wx.adv
except ImportError:
exit()
RESOURCES_PATH = os.path.join(PATH, "superpaper/resources")
TRAY_ICON = os.path.join(RESOURCES_PATH, "superpaper.png")
class BrowsePaths(wx.Dialog):
"""Path picker dialog class."""
def __init__(self, parent, use_multi_image, defdir, num_span_groups=None):
wx.Dialog.__init__(self, parent, -1,
'Choose image source directories or image files',
size=(250, 250),
style=wx.RESIZE_BORDER|wx.DEFAULT_DIALOG_STYLE)
self.SetMinSize((250, 250))
BMP_SIZE = 32
self.tsize = (BMP_SIZE, BMP_SIZE)
self.il = wx.ImageList(BMP_SIZE, BMP_SIZE)
if num_span_groups:
self.num_wallpaper_area = num_span_groups
self.wp_area_name = "Group"
else:
self.num_wallpaper_area = wpproc.NUM_DISPLAYS
self.wp_area_name = "Display"
self.use_multi_image = use_multi_image
self.path_list_data = []
self.paths = []
sizer_main = wx.BoxSizer(wx.VERTICAL)
sizer_browse = wx.BoxSizer(wx.VERTICAL)
self.sizer_paths_list = wx.BoxSizer(wx.VERTICAL)
sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
self.defdir = defdir
self.dir3 = wx.GenericDirCtrl(
self, -1,
# size=(450, 550),
# style=wx.DIRCTRL_SHOW_FILTERS|wx.DIRCTRL_MULTIPLE,
# style=wx.DIRCTRL_MULTIPLE,
dir=self.defdir,
filter="Image files (*.jpg, *.png)|*.jpg;*.jpeg;*.png;*.bmp;*.gif;*.tiff;*.webp"
)
sizer_browse.Add(self.dir3, 1, wx.CENTER|wx.ALL|wx.EXPAND, 5)
st_paths_list = wx.StaticText(
self, -1,
"Selected wallpaper source directories and files:"
)
self.sizer_paths_list.Add(st_paths_list, 0, wx.ALIGN_LEFT|wx.ALL, 5)
self.create_paths_listctrl(self.use_multi_image)
if self.use_multi_image:
sizer_radio = wx.BoxSizer(wx.VERTICAL)
radio_choices_displays = [
self.wp_area_name + " {}".format(i) for i in range(self.num_wallpaper_area)
]
self.radiobox_displays = wx.RadioBox(self, wx.ID_ANY,
label="Select display to add sources to",
choices=radio_choices_displays,
style=wx.RA_HORIZONTAL
)
sizer_radio.Add(self.radiobox_displays, 0, wx.CENTER|wx.ALL|wx.EXPAND, 5)
# Buttons
self.button_add = wx.Button(self, label="Add source")
self.button_remove = wx.Button(self, label="Remove source")
self.button_defdir = wx.Button(self, label="Save as browse start")
self.button_clrdefdir = wx.Button(self, label="Clear browse start")
self.button_ok = wx.Button(self, label="Ok")
self.button_cancel = wx.Button(self, label="Cancel")
self.button_add.Bind(wx.EVT_BUTTON, self.onAdd)
self.button_remove.Bind(wx.EVT_BUTTON, self.onRemove)
self.button_defdir.Bind(wx.EVT_BUTTON, self.onDefDir)
self.button_clrdefdir.Bind(wx.EVT_BUTTON, self.onClrDefDir)
self.button_ok.Bind(wx.EVT_BUTTON, self.onOk)
self.button_cancel.Bind(wx.EVT_BUTTON, self.onCancel)
sizer_buttons.Add(self.button_add, 0, wx.CENTER|wx.ALL, 5)
sizer_buttons.Add(self.button_remove, 0, wx.CENTER|wx.ALL, 5)
sizer_buttons.AddStretchSpacer()
sizer_buttons.Add(self.button_defdir, 0, wx.CENTER|wx.ALL, 5)
sizer_buttons.Add(self.button_clrdefdir, 0, wx.CENTER|wx.ALL, 5)
sizer_buttons.AddStretchSpacer()
sizer_buttons.Add(self.button_ok, 0, wx.CENTER|wx.ALL, 5)
sizer_buttons.Add(self.button_cancel, 0, wx.CENTER|wx.ALL, 5)
sizer_main.Add(sizer_browse, 1, wx.ALL|wx.EXPAND)
sizer_main.Add(self.sizer_paths_list, 0, wx.ALL|wx.EXPAND)
if self.use_multi_image:
sizer_main.Add(sizer_radio, 0, wx.ALL|wx.EXPAND, 5)
sizer_main.Add(sizer_buttons, 0, wx.ALL|wx.EXPAND, 5)
# self.SetSizer(sizer_main)
self.SetSizerAndFit(sizer_main)
self.SetSize((450, 650))
# self.SetAutoLayout(True)
def create_paths_listctrl(self, use_multi_image):
if use_multi_image:
self.paths_listctrl = wx.ListCtrl(self, -1,
size=(-1, -1),
style=wx.LC_REPORT
# | wx.BORDER_SUNKEN
| wx.BORDER_SIMPLE
# | wx.BORDER_STATIC
# | wx.BORDER_THEME
# | wx.BORDER_NONE
# | wx.LC_EDIT_LABELS
| wx.LC_SORT_ASCENDING
# | wx.LC_NO_HEADER
# | wx.LC_VRULES
# | wx.LC_HRULES
# | wx.LC_SINGLE_SEL
)
self.paths_listctrl.InsertColumn(0, self.wp_area_name, wx.LIST_FORMAT_RIGHT, width=100)
self.paths_listctrl.InsertColumn(1, 'Source', width=620)
else:
# show simpler listing without header if only one wallpaper target
self.paths_listctrl = wx.ListCtrl(self, -1,
size=(-1, -1),
style=wx.LC_REPORT
# | wx.BORDER_SUNKEN
| wx.BORDER_SIMPLE
# | wx.BORDER_STATIC
# | wx.BORDER_THEME
# | wx.BORDER_NONE
# | wx.LC_EDIT_LABELS
# | wx.LC_SORT_ASCENDING
| wx.LC_NO_HEADER
# | wx.LC_VRULES
# | wx.LC_HRULES
# | wx.LC_SINGLE_SEL
)
self.paths_listctrl.InsertColumn(0, 'Source', width=720)
# Add the item list to the control
self.paths_listctrl.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.sizer_paths_list.Add(self.paths_listctrl, 1, wx.CENTER|wx.ALL|wx.EXPAND, 5)
def append_to_listctrl(self, data_row):
if self.use_multi_image:
img_id = self.add_to_imagelist(data_row[1])
index = self.paths_listctrl.InsertItem(self.paths_listctrl.GetItemCount(), data_row[0], img_id)
self.paths_listctrl.SetItem(index, 1, data_row[1])
else:
img_id = self.add_to_imagelist(data_row[0])
index = self.paths_listctrl.InsertItem(self.paths_listctrl.GetItemCount(), data_row[0], img_id)
# self.paths_listctrl.SetItem(index, 1, data[1])
def add_to_imagelist(self, path):
folder_bmp = wx.ArtProvider.GetBitmap(wx.ART_FOLDER, wx.ART_TOOLBAR, self.tsize)
# file_bmp = wx.ArtProvider.GetBitmap(wx.ART_NORMAL_FILE, wx.ART_TOOLBAR, self.tsize)
if os.path.isdir(path):
img_id = self.il.Add(folder_bmp)
else:
thumb_bmp = self.create_thumb_bmp(path)
img_id = self.il.Add(thumb_bmp)
return img_id
def create_thumb_bmp(self, filename):
wximg = wx.Image(filename, type=wx.BITMAP_TYPE_ANY)
imgsize = wximg.GetSize()
w2h_ratio = imgsize[0]/imgsize[1]
if w2h_ratio > 1:
target_w = self.tsize[0]
target_h = target_w/w2h_ratio
pos = (0, round((target_w - target_h)/2))
else:
target_h = self.tsize[1]
target_w = target_h*w2h_ratio
pos = (round((target_h - target_w)/2), 0)
bmp = wximg.Scale(target_w,
target_h,
quality=wx.IMAGE_QUALITY_BOX_AVERAGE
).Resize(self.tsize,
pos
).ConvertToBitmap()
return bmp
#
# BUTTON methods
#
def onAdd(self, event):
"""Adds selected path to export field."""
path_data_tuples = []
sel_path = self.dir3.GetPath()
# self.dir3.GetPaths(paths) # more efficient but couldn't get to work
if self.use_multi_image:
# Extra column in advanced mode
disp_id = str(self.radiobox_displays.GetSelection())
self.append_to_listctrl([disp_id, sel_path])
else:
self.append_to_listctrl([sel_path])
def onRemove(self, event):
"""Removes last appended path from export field."""
item = self.paths_listctrl.GetFocusedItem()
if item != -1:
self.paths_listctrl.DeleteItem(item)
def onDefDir(self, event):
sel_path = self.dir3.GetPath()
if os.path.isdir(sel_path):
self.defdir = sel_path
current_settings = GeneralSettingsData()
current_settings.browse_default_dir = self.defdir.strip()
current_settings.save_settings()
else:
pass
def onClrDefDir(self, event):
self.defdir = ""
current_settings = GeneralSettingsData()
current_settings.browse_default_dir = ""
current_settings.save_settings()
def onOk(self, event):
"""Exports path to parent Profile Config dialog."""
columns = self.paths_listctrl.GetColumnCount()
for idx in range(self.paths_listctrl.GetItemCount()):
item_dat = []
for col in range(columns):
item_dat.append(self.paths_listctrl.GetItemText(idx, col))
self.path_list_data.append(item_dat)
# print(self.path_list_data)
# if listctrl is empty, onOk maybe could pass on the selected item? or disable OK if list is empty?
self.EndModal(wx.ID_OK)
def onCancel(self, event):
"""Closes path picker, throwing away selections."""
self.Destroy()
class DisplayPositionEntry(wx.Frame):
"""Display position fine control dialog."""
def __init__(self, parent):
wx.Frame.__init__(self, parent.frame, -1,
'Enter display positions'
)
self.ToggleWindowStyle(wx.STAY_ON_TOP)
self.SetIcon(wx.Icon(TRAY_ICON, wx.BITMAP_TYPE_PNG))
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.tc_width = 100
self.parent = parent
self.frame = parent # help dialog looks for this name
self.parent.button_save.Disable()
self.parent.button_cancel.Disable()
self.display_sys = parent.display_sys
self.parent.export_offsets(self.display_sys) # export dragged offsets first
self.old_ppinorm_offs = self.display_sys.get_ppinorm_offsets() # back up the offsets
self.help_bmp = wx.ArtProvider.GetBitmap(wx.ART_QUESTION, wx.ART_BUTTON, (20, 20))
sizer_main = wx.BoxSizer(wx.VERTICAL)
# Display position config
self.create_position_config(wpproc.NUM_DISPLAYS)
# Bottom row buttons
self.create_bottom_butts()
self.set_unit_labels(self.cb_use_px.GetValue())
sizer_main.Add(self.sizer_pos_conf, 0, wx.ALL|wx.EXPAND, 5)
sizer_main.Add(self.sizer_buttons, 0, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer_main)
self.Fit()
self.populate_fields()
self.Center()
self.Show()
def create_position_config(self, num_disps):
"""Create display position entry and data grid sizer."""
cols = 7
gap = 5
self.sizer_pos_conf = wx.BoxSizer(wx.VERTICAL)
self.grid = wx.FlexGridSizer(cols, gap, gap)
# header
hd_id = wx.StaticText(self, -1, "Display")
self.hd_left = wx.StaticText(self, -1, "Left")
self.hd_top = wx.StaticText(self, -1, "Top")
self.hd_right = wx.StaticText(self, -1, "Right")
self.hd_bott = wx.StaticText(self, -1, "Bottom")
self.hd_left_new = wx.StaticText(self, -1, "Left new")
self.hd_top_new = wx.StaticText(self, -1, "Top new")
self.grid.Add(hd_id, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(self.hd_left, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(self.hd_top, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(self.hd_right, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(self.hd_bott, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(self.hd_left_new, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(self.hd_top_new, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
# Fill grid rows
self.grid_rows = []
for i in range(num_disps):
row = self.display_opt_widget_row(i)
self.grid_rows.append(row)
sizer_row = [(item, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 1)
for item in row]
self.grid.AddMany(sizer_row)
self.sizer_pos_conf.Add(self.grid, 0, wx.EXPAND|wx.ALL, 0)
def display_opt_widget_row(self, row_id):
"""Return a display position config widget row."""
# statbox_disp_opts = self.sizer_disp_opts.GetStaticBox()
statbox_disp_opts = self
row_id = wx.StaticText(statbox_disp_opts, -1, str(row_id))
row_left = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width, -1),
style=wx.TE_RIGHT|wx.TE_READONLY)
row_top = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width, -1),
style=wx.TE_RIGHT|wx.TE_READONLY)
row_right = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width, -1),
style=wx.TE_RIGHT|wx.TE_READONLY)
row_bottom = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width, -1),
style=wx.TE_RIGHT|wx.TE_READONLY)
row_left_new = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width, -1),
style=wx.TE_RIGHT|wx.TE_PROCESS_ENTER)
row_top_new = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width, -1),
style=wx.TE_RIGHT|wx.TE_PROCESS_ENTER)
row_left.Disable()
row_top.Disable()
row_right.Disable()
row_bottom.Disable()
row_left_new.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
row_top_new.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
row = [row_id, row_left, row_top, row_right, row_bottom, row_left_new, row_top_new]
return row
def set_unit_labels(self, use_px):
"""Show units in column labels."""
if use_px:
unit_str = "[px]"
else:
unit_str = "[mm]"
self.hd_left.SetLabel("Left " + unit_str)
self.hd_top.SetLabel("Top " + unit_str)
self.hd_right.SetLabel("Right " + unit_str)
self.hd_bott.SetLabel("Bottom " + unit_str)
self.hd_left_new.SetLabel("Left new " + unit_str)
self.hd_top_new.SetLabel("Top new " + unit_str)
def create_bottom_butts(self):
"""Create sizer for bottom row buttons."""
self.sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
self.cb_use_px = wx.CheckBox(self, -1, "Use pixels (exact)")
self.cb_use_px.SetValue(False)
self.cb_use_px.Bind(wx.EVT_CHECKBOX, self.onCbusepx)
self.button_preview = wx.Button(self, label="Preview")
self.button_apply = wx.Button(self, label="Apply")
self.button_cancel = wx.Button(self, label="Cancel")
self.button_preview.Bind(wx.EVT_BUTTON, self.onPreview)
self.button_apply.Bind(wx.EVT_BUTTON, self.onApply)
self.button_cancel.Bind(wx.EVT_BUTTON, self.onCancel)
self.button_help_pos = wx.BitmapButton(self, bitmap=self.help_bmp)
self.button_help_pos.Bind(wx.EVT_BUTTON, self.onHelpExactPositions)
self.sizer_buttons.Add(self.cb_use_px, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.sizer_buttons.AddStretchSpacer()
self.sizer_buttons.Add(self.button_help_pos, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.Add(self.button_preview, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.Add(self.button_apply, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.Add(self.button_cancel, 0, wx.CENTER|wx.ALL, 5)
def OnEnter(self, evt):
"""Bind pressing Enter in the txtctrl to update preview."""
self.onPreview(evt)
def OnClose(self, evt):
"""Make closing out behave as cancellation."""
self.onCancel(evt)
def onCbusepx(self, event):
"""Updates units of shown position data."""
use_px = self.cb_use_px.GetValue()
self.convert_units(use_px)
self.set_unit_labels(use_px)
def update_offsets_and_redraw(self):
"""Collect display postitions and redraw preview."""
max_ppi = self.display_sys.max_ppi()
if self.cb_use_px.GetValue():
unit_mult = 1
else:
unit_mult = max_ppi/25.4 # convert ppi to px/mm
offs = []
for row in self.grid_rows:
new_off = []
for tc in row[-2:]:
tc_val = tc.GetValue()
try:
new_off.append(float(tc_val)*unit_mult)
except ValueError:
if tc_val:
msg = "Entered value '{}' is not valid.".format(tc_val)
else:
msg = "Please enter a position for every display. Some value is empty."
show_message_dialog(msg)
return False
offs.append(tuple(new_off))
offs = self.sanitize_offs(offs)
self.display_sys.update_ppinorm_offsets(offs)
self.parent.display_data = self.display_sys.get_disp_list(True)
self.parent.refresh_preview(use_ppi_px=True, force_refresh=True)
self.parent.create_shapes()
self.parent.frame.Refresh()
self.populate_fields()
return True
def onPreview(self, event):
"""Updates the display preview based on the entered position values."""
self.update_offsets_and_redraw()
def onApply(self, event):
"""Apply display positions and close dialog."""
res = self.update_offsets_and_redraw()
if res:
self.parent.button_save.Enable()
self.parent.button_cancel.Enable()
self.Destroy()
def onCancel(self, event):
"""Closes display position config, throwing away unsaved contents."""
# Restore old offsets
self.display_sys.update_ppinorm_offsets(self.old_ppinorm_offs)
# redraw preview with restored data
self.parent.display_data = self.display_sys.get_disp_list(True)
self.parent.refresh_preview(use_ppi_px=True, force_refresh=True)
self.parent.create_shapes()
self.parent.frame.Refresh()
# close dialog
self.parent.button_save.Enable()
self.parent.button_cancel.Enable()
self.Destroy()
def onHelpExactPositions(self, evt):
"""Popup exact position entry main help."""
text = ("In this dialog you may adjust the positions of your displays\n"
"more accurately by either entering measurements or just\n"
"fine tune the dragged positions by trial and error. You can\n"
"either enter the positions in millimeters or in the pixels\n"
"of the used image. Position update might be inaccurate if you\n"
"need to move a display substantially outside the shown area.\n"
"In this case save the current position and then proceed to\n"
"correct the position further.\n"
"\n"
"Current positions of the edges of each display Left, Top,\n"
"Right, Bottom are given as measured from the top left\n"
"corner of the display area such that the left-most and\n"
"top-most edge are at 0. Right, Bottom edge positions\n"
"include bezel sizes."
)
pop = HelpPopup(self, text,
show_image_quality=False,
)
btn = evt.GetEventObject()
pos = btn.ClientToScreen((0, 0))
sz = btn.GetSize()
pop.Position(pos, (0, sz[1]))
pop.Popup()
def populate_fields(self):
"""Populate config fields from DisplaySystem offsets."""
max_ppi = self.display_sys.max_ppi()
if self.cb_use_px.GetValue():
unit_mult = 1
else:
unit_mult = 1 / (max_ppi/25.4) # convert ppi to px/mm
crops = self.display_sys.get_ppi_norm_crops(wpproc.NUM_DISPLAYS*[(0, 0)])
bezels = self.display_sys.bezels_in_px()
for row, ltrb, bez in zip(self.grid_rows, crops, bezels):
row[0].SetLabel(str(self.grid_rows.index(row)))
row[1].SetValue(str(ltrb[0]*unit_mult))
row[2].SetValue(str(ltrb[1]*unit_mult))
row[3].SetValue(str((ltrb[2] + bez[0])*unit_mult))
row[4].SetValue(str((ltrb[3] + bez[1])*unit_mult))
row[5].SetValue(str(ltrb[0]*unit_mult))
row[6].SetValue(str(ltrb[1]*unit_mult))
def convert_units(self, use_px):
"""Convert table data between px and mm in place."""
max_ppi = self.display_sys.max_ppi()
if use_px:
# convert from mm to px
unit_mult = max_ppi / 25.4
else:
# convert from px to mm
unit_mult = 1 / (max_ppi/25.4)
for row in self.grid_rows:
for tc in row[1:]:
curval = tc.GetValue()
if curval:
tc.SetValue(str(unit_mult * float(curval)))
def sanitize_offs(self, offsets):
"""Return offsets translated to be non-negative, anchoring to (0,0)."""
sanitized_offs = []
leftmost_offset = min([off[0] for off in offsets])
topmost_offset = min([off[1] for off in offsets])
for off in offsets:
sanitized_offs.append(
(
off[0] - leftmost_offset,
off[1] - topmost_offset
)
)
return sanitized_offs
class PerspectiveConfig(wx.Dialog):
"""Perspective data configuration dialog."""
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1,
'Configure wallpaper perspective rotations',
# size=(750, 850)
)
self.tc_width = 150
self.frame = parent
self.display_sys = parent.display_sys
self.persp_dict = self.display_sys.perspective_dict
self.test_image = None
self.help_bmp = wx.ArtProvider.GetBitmap(wx.ART_QUESTION, wx.ART_BUTTON, (20, 20))
self.warn_large_img = GeneralSettingsData().warn_large_img
sizer_main = wx.BoxSizer(wx.VERTICAL)
# Master options
sizer_top = wx.BoxSizer(wx.HORIZONTAL)
self.cb_master = wx.CheckBox(self, -1, "Use perspective corrections")
self.cb_master.SetValue(self.display_sys.use_perspective)
# self.cb_master.Bind(wx.EVT_CHECKBOX, self.onCbmaster)
self.button_help_persp = wx.BitmapButton(self, bitmap=self.help_bmp)
self.button_help_persp.Bind(wx.EVT_BUTTON, self.onHelpPerspective)
sizer_top.Add(self.cb_master, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer_top.AddStretchSpacer()
sizer_top.Add(self.button_help_persp, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 10)
# Profile options
self.create_profile_opts()
# Display perspective config
self.create_display_opts(wpproc.NUM_DISPLAYS)
# Bottom row buttons
self.create_bottom_butts()
# sizer_main.Add(self.cb_master, 0, wx.ALL|wx.ALIGN_LEFT, 5)
sizer_main.Add(sizer_top, 0, wx.ALL|wx.EXPAND, 0)
sizer_main.Add(self.sizer_prof_opts, 0, wx.ALL|wx.EXPAND, 5)
sizer_main.Add(self.sizer_disp_opts, 0, wx.ALL|wx.EXPAND, 5)
sizer_main.Add(self.sizer_buttons, 0, wx.ALL|wx.EXPAND, 5)
self.SetSizer(sizer_main)
self.Fit()
if self.display_sys.default_perspective:
self.populate_fields(self.display_sys.default_perspective)
self.choice_profiles.SetSelection(
self.choice_profiles.FindString(self.display_sys.default_perspective)
)
def create_profile_opts(self):
"""Create sizer for perspective profile options."""
self.sizer_prof_opts = wx.StaticBoxSizer(wx.VERTICAL, self, "Perspective profile")
statbox_profs = self.sizer_prof_opts.GetStaticBox()
self.sizer_prof_bar = wx.BoxSizer(wx.HORIZONTAL)
self.profnames = list(self.persp_dict.keys())
self.profnames.append("Create a new profile")
self.choice_profiles = wx.Choice(statbox_profs, -1,
name="ProfileChoice", choices=self.profnames)
self.choice_profiles.Bind(wx.EVT_CHOICE, self.onSelect)
st_choice_profiles = wx.StaticText(statbox_profs, -1, "Perspective profiles:")
# name txt ctrl
st_name = wx.StaticText(statbox_profs, -1, "Profile name:")
self.tc_name = wx.TextCtrl(statbox_profs, -1, size=(self.tc_width, -1))
self.tc_name.SetMaxLength(14)
# buttons
self.button_new = wx.Button(statbox_profs, label="New")
self.button_save = wx.Button(statbox_profs, label="Save")
self.button_delete = wx.Button(statbox_profs, label="Delete")
self.button_help_perspprof = wx.BitmapButton(statbox_profs, bitmap=self.help_bmp)
self.button_new.Bind(wx.EVT_BUTTON, self.onCreateNewProfile)
self.button_save.Bind(wx.EVT_BUTTON, self.onSave)
self.button_delete.Bind(wx.EVT_BUTTON, self.onDeleteProfile)
self.button_help_perspprof.Bind(wx.EVT_BUTTON, self.onHelpPersProfile)
# Add profile bar items to the sizer
self.sizer_prof_bar.Add(st_choice_profiles, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.Add(self.choice_profiles, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.Add(st_name, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.Add(self.tc_name, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.Add(self.button_new, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.Add(self.button_save, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.Add(self.button_delete, 0, wx.CENTER|wx.ALL, 5)
self.sizer_prof_bar.AddStretchSpacer()
self.sizer_prof_bar.Add(self.button_help_perspprof, 0, wx.CENTER|wx.LEFT, 5)
self.sizer_prof_opts.Add(self.sizer_prof_bar, 0,
wx.EXPAND|wx.ALL, 5)
sline = wx.StaticLine(statbox_profs, -1, style=wx.LI_HORIZONTAL)
self.sizer_prof_opts.Add(sline, 0, wx.EXPAND|wx.ALL, 5)
# Profile related options
self.cb_dispsys_def = wx.CheckBox(statbox_profs, -1, "Default for this display setup")
sizer_centr_disp = wx.BoxSizer(wx.HORIZONTAL)
st_centr_disp = wx.StaticText(statbox_profs, -1, "Central display:")
disp_ids = [str(idx) for idx in range(wpproc.NUM_DISPLAYS)]
self.choice_centr_disp = wx.Choice(statbox_profs, -1,
name="CentDispChoice", choices=disp_ids)
self.choice_centr_disp.SetSelection(0)
sizer_centr_disp.Add(st_centr_disp, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer_centr_disp.Add(self.choice_centr_disp, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sizer_viewer_off = wx.BoxSizer(wx.HORIZONTAL)
st_vwroffs = wx.StaticText(statbox_profs, -1,
"Viewer offset from central display center [mm]:")
self.stlist_vieweroffs = [
wx.StaticText(statbox_profs, -1, "hor:"),
wx.StaticText(statbox_profs, -1, "ver:"),
wx.StaticText(statbox_profs, -1, "dist:"),
]
self.tclist_vieweroffs = [
wx.TextCtrl(statbox_profs, -1, size=(self.tc_width*0.69, -1), style=wx.TE_RIGHT),
wx.TextCtrl(statbox_profs, -1, size=(self.tc_width*0.69, -1), style=wx.TE_RIGHT),
wx.TextCtrl(statbox_profs, -1, size=(self.tc_width*0.69, -1), style=wx.TE_RIGHT)
]
for tc in self.tclist_vieweroffs:
if isinstance(tc, wx.TextCtrl):
tc.SetValue("0")
szr_stlist = [(item, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
for item in self.stlist_vieweroffs]
szr_tclist = [(item, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
for item in self.tclist_vieweroffs]
sizer_viewer_off.Add(st_vwroffs, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
for st, tc in zip(szr_stlist, szr_tclist):
sizer_viewer_off.Add(st[0], st[1], st[2], st[3])
sizer_viewer_off.Add(tc[0], tc[1], tc[2], tc[3])
self.button_help_centrald = wx.BitmapButton(statbox_profs, bitmap=self.help_bmp)
self.button_help_centrald.Bind(wx.EVT_BUTTON, self.onHelpCentralDisp)
sizer_viewer_off.AddStretchSpacer()
sizer_viewer_off.Add(self.button_help_centrald, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
# Add remaining options to persp profile sizer
self.sizer_prof_opts.Add(self.cb_dispsys_def, 0, wx.ALL, 5)
self.sizer_prof_opts.Add(sizer_centr_disp, 0, wx.LEFT, 5)
self.sizer_prof_opts.Add(sizer_viewer_off, 0, wx.LEFT|wx.EXPAND, 5)
def create_display_opts(self, num_disps):
"""Create sizer for display perspective options."""
self.sizer_disp_opts = wx.StaticBoxSizer(wx.HORIZONTAL, self,
"Display perspective configuration")
statbox_disp_opts = self.sizer_disp_opts.GetStaticBox()
cols = 8
gap = 5
self.grid = wx.FlexGridSizer(cols, gap, gap)
# header
hd_id = wx.StaticText(statbox_disp_opts, -1, "Display")
hd_sax = wx.StaticText(statbox_disp_opts, -1, "Swivel axis")
hd_san = wx.StaticText(statbox_disp_opts, -1, "Swivel angle")
hd_sol = wx.StaticText(statbox_disp_opts, -1, "Sw. ax. lat. off.")
hd_sod = wx.StaticText(statbox_disp_opts, -1, "Sw. ax. dep. off.")
hd_tan = wx.StaticText(statbox_disp_opts, -1, "Tilt angle")
hd_tov = wx.StaticText(statbox_disp_opts, -1, "Ti. ax. ver. off.")
hd_tod = wx.StaticText(statbox_disp_opts, -1, "Ti. ax. dep. off.")
self.grid.Add(hd_id, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_sax, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_san, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_sol, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_sod, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_tan, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_tov, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
self.grid.Add(hd_tod, 0, wx.LEFT|wx.RIGHT|wx.BOTTOM, 1)
# Fill grid rows
self.grid_rows = []
for i in range(num_disps):
row = self.display_opt_widget_row(i)
self.grid_rows.append(row)
sizer_row = [(item, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 1)
for item in row]
self.grid.AddMany(sizer_row)
# Build sizer
self.sizer_disp_opts.Add(self.grid, 0, wx.ALL|wx.EXPAND, 5)
# help
self.button_help_data = wx.BitmapButton(statbox_disp_opts, bitmap=self.help_bmp)
self.button_help_data.Bind(wx.EVT_BUTTON, self.onHelpData)
self.sizer_disp_opts.AddStretchSpacer()
self.sizer_disp_opts.Add(self.button_help_data, 0,
wx.ALIGN_TOP|wx.RIGHT, 5)
def display_opt_widget_row(self, row_id):
"""Return a display option widget row."""
statbox_disp_opts = self.sizer_disp_opts.GetStaticBox()
row_id = wx.StaticText(statbox_disp_opts, -1, str(row_id))
row_sax = wx.Choice(statbox_disp_opts, -1, name="SwivelAxisChoice",
size=(self.tc_width*0.7, -1),
choices=["No swivel", "Left", "Right"])
row_san = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width*0.69, -1),
style=wx.TE_RIGHT)
row_sol = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width*0.69, -1),
style=wx.TE_RIGHT)
row_sod = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width*0.69, -1),
style=wx.TE_RIGHT)
row_tan = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width*0.69, -1),
style=wx.TE_RIGHT)
row_tov = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width*0.69, -1),
style=wx.TE_RIGHT)
row_tod = wx.TextCtrl(statbox_disp_opts, -1, size=(self.tc_width*0.69, -1),
style=wx.TE_RIGHT)
# Prefill neutral data
row_sax.SetSelection(0)
row_san.SetValue("0")
row_sol.SetValue("0")
row_sod.SetValue("0")
row_tan.SetValue("0")
row_tov.SetValue("0")
row_tod.SetValue("0")
row = [row_id, row_sax, row_san, row_sol, row_sod, row_tan, row_tov, row_tod]
return row
def create_bottom_butts(self):
"""Create sizer for bottom row buttons."""
self.sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
self.button_align_test = wx.Button(self, label="Align test")
self.button_test_pick = wx.Button(self, label="Pick image")
self.button_test_imag = wx.Button(self, label="Test image")
self.button_ok = wx.Button(self, label="OK")
self.button_cancel = wx.Button(self, label="Close")
self.button_align_test.Bind(wx.EVT_BUTTON, self.onAlignTest)
self.button_test_pick.Bind(wx.EVT_BUTTON, self.onChooseTestImage)
self.button_test_imag.Bind(wx.EVT_BUTTON, self.onTestWallpaper)
self.button_ok.Bind(wx.EVT_BUTTON, self.onOk)
self.button_cancel.Bind(wx.EVT_BUTTON, self.onCancel)
self.sizer_buttons.Add(self.button_align_test, 0, wx.CENTER|wx.ALL, 5)
sline = wx.StaticLine(self, -1, style=wx.LI_VERTICAL)
self.sizer_buttons.Add(sline, 0, wx.EXPAND|wx.ALL, 5)
self.tc_testimage = wx.TextCtrl(self, -1, size=(self.tc_width, -1))
self.sizer_buttons.Add(self.tc_testimage, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.Add(self.button_test_pick, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.Add(self.button_test_imag, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.AddStretchSpacer()
self.sizer_buttons.Add(self.button_ok, 0, wx.CENTER|wx.ALL, 5)
self.sizer_buttons.Add(self.button_cancel, 0, wx.CENTER|wx.ALL, 5)
def populate_fields(self, persp_name):
"""Populate config fields from DisplaySystem perspective dict."""
persd = self.persp_dict[persp_name]
self.cb_master.SetValue(self.display_sys.use_perspective)
self.tc_name.SetValue(persp_name)
self.cb_dispsys_def.SetValue(persp_name == self.display_sys.default_perspective)
self.choice_centr_disp.SetSelection(persd["central_disp"])
px_per_mm = self.display_sys.max_ppi() / 25.4
for tc, offs in zip(self.tclist_vieweroffs, persd["viewer_pos"]):
tc.SetValue(str(offs / px_per_mm))
self.populate_grid(persd["swivels"], persd["tilts"], px_per_mm)
def populate_grid(self, swivels, tilts, px_per_mm):
"""Fill data grid from lists."""
for row, sw, ti in zip(self.grid_rows, swivels, tilts):
row[0].SetLabel(str(self.grid_rows.index(row)))
row[1].SetSelection(sw[0])
row[2].SetValue(str(sw[1]))
row[3].SetValue(str(round(sw[2] / px_per_mm, 1)))
row[4].SetValue(str(round(sw[3] / px_per_mm, 1)))
row[5].SetValue(str(ti[0]))
row[6].SetValue(str(round(ti[1] / px_per_mm, 1)))
row[7].SetValue(str(round(ti[2] / px_per_mm, 1)))
def collect_data_column(self, column):
"""Collect data from display data grid, returns a column as a list.
Column ids are
0 display id
1 swivel axis
2 swivel angle
3 swivel lat offset
4 swivel dep offset
5 tilt angle
6 tilt vert offset
7 tilt dep offset
"""
data = []
for row in self.grid_rows:
if column == 0:
datum = row[column].GetSelection()
try:
data.append(int(datum))
except ValueError:
pass
elif column == 1:
datum = row[column].GetSelection()
try:
data.append(int(datum))
except ValueError:
pass
else:
datum = row[column].GetLineText(0)
try:
data.append(float(datum))
except ValueError:
pass
return data
def update_choiceprofile(self):
"""Reload profile list into the choice box."""
self.profnames = list(self.persp_dict.keys())
self.profnames.append("Create a new profile")
self.choice_profiles.SetItems(self.profnames)
def check_for_large_image_size(self, persp_name):
"""Compute how large an image the current perspective
settings would produce as an intermediate step."""
if self.frame.cb_offsets.GetValue():
offsets = []
for tc in self.frame.tc_list_offsets:
off_str = tc.GetValue().split(",")
try:
offsets.append(
(int(off_str[0]), int(off_str[1]))
)
except (ValueError, IndexError):
offsets.append(
(0, 0)
)
else:
offsets = wpproc.NUM_DISPLAYS * [(0, 0)]
crops = self.display_sys.get_ppi_norm_crops(offsets)
persp_data = self.display_sys.get_persp_data(persp_name)
if persp_data:
proj_plane_crops, persp_coeffs = persp.get_backprojected_display_system(crops,
persp_data)
# Canvas containing back-projected displays
canv = wpproc.compute_working_canvas(proj_plane_crops)
max_size = 12000
if canv[0] > max_size or canv[1] > max_size:
return (True, canv)
return (False, canv)
#
# Button methods
#
def checkCbmaster(self, event=None):
"""Save master checkbox state into display system."""
master = self.cb_master.GetValue()
if master != self.display_sys.use_perspective:
self.display_sys.use_perspective = master
self.display_sys.save_system()
def onSelect(self, event):
"""Acts once a profile is picked in the dropdown menu."""
event_object = event.GetEventObject()
if event_object.GetName() == "ProfileChoice":
item = event.GetSelection()
item_str = event.GetString()
if item_str == "Create a new profile":
self.onCreateNewProfile(event)
else:
self.populate_fields(item_str)
else:
pass
def onSave(self, evt=None):
"""Save perspective config to file and update display system state."""
persp_name = self.tc_name.GetLineText(0)
if not persp_name:
msg = "Profile name is required."
show_message_dialog(msg)
return 0
toggle = self.cb_master.GetValue()
is_ds_def = self.cb_dispsys_def.GetValue()
centr_disp = int(self.choice_centr_disp.GetSelection())
px_per_mm = self.display_sys.max_ppi() / 25.4
try:
# covert lenghts to ppi norm res
viewer_offset = [
px_per_mm * float(tc.GetLineText(0)) for tc in self.tclist_vieweroffs
]
except ValueError:
msg = "Viewer offsets should be lenghts in millimeters, separate decimals with a point."
show_message_dialog(msg)
return 0
if viewer_offset[2] <= 0:
msg = "Viewer distance must be entered and positive."
show_message_dialog(msg)
return 0
viewer_data = (centr_disp, viewer_offset)
swivels = []
sw_axii = self.collect_data_column(1)
sw_angl = self.collect_data_column(2)
sw_lato = self.collect_data_column(3)
sw_depo = self.collect_data_column(4)
for ax, an, lo, do in zip(sw_axii, sw_angl, sw_lato, sw_depo):
swivels.append(
(ax, an, px_per_mm * lo, px_per_mm * do)
)
tilts = []
ti_angl = self.collect_data_column(5)
ti_vero = self.collect_data_column(6)
ti_depo = self.collect_data_column(7)
for an, vo, do in zip(ti_angl, ti_vero, ti_depo):
tilts.append(
(an, px_per_mm * vo, px_per_mm * do)
)
# update and save data
# check for large images
if self.warn_large_img:
self.display_sys.update_perspectives(
"temp", toggle, is_ds_def, viewer_data, swivels, tilts
)
too_large, canvas = self.check_for_large_image_size("temp")
if too_large:
msg = ("These perspective settings will produce large intermediate images "
+ "which might use a large amount of system memory during processing. "
+ "Take care not to set the perspective so that you would see arbitrarily "
+ "far into the projected image as this will produce unboundedly large "
+ "images and will cause problems, even a system crash."
+ "\n\n"
+ "Intermediate resolution with these settings is {}x{}".format(canvas[0], canvas[1])
+ "\n\n"
+ "Do you want to continue?\n"
+ "\n"
+ "This warning may be disabled from settings.")
res = show_message_dialog(msg, "Info", style="YES_NO")
if not res:
# Stop saving, remove temp
self.persp_dict.pop("temp", None)
return 0
else:
# Continue and write profile
self.persp_dict.pop("temp", None)
self.display_sys.update_perspectives(
persp_name, toggle, is_ds_def, viewer_data, swivels, tilts
)
else:
# No large images, temp not needed
self.persp_dict.pop("temp", None)
self.display_sys.update_perspectives(
persp_name, toggle, is_ds_def, viewer_data, swivels, tilts
)
else:
self.display_sys.update_perspectives(
persp_name, toggle, is_ds_def, viewer_data, swivels, tilts
)
self.display_sys.save_perspectives()
# update dialog profile list
self.update_choiceprofile()
self.choice_profiles.SetSelection(
self.choice_profiles.FindString(persp_name)
)
return 1
def onDeleteProfile(self, evt):
"""Delete selected perspective profile."""
persp_name = self.choice_profiles.GetString(self.choice_profiles.GetSelection())
if self.display_sys.default_perspective == persp_name:
self.display_sys.default_perspective = None
self.display_sys.save_system()
self.persp_dict.pop(persp_name, None)
self.display_sys.save_perspectives()
# update dialog profile list
self.update_choiceprofile()
self.onCreateNewProfile(None)
def onCreateNewProfile(self, evt):
"""Reset profile settings options to neutral state."""
self.cb_master.SetValue(self.display_sys.use_perspective)
self.choice_profiles.SetSelection(
self.choice_profiles.FindString("Create a new profile")
)
self.tc_name.SetValue("")
self.cb_dispsys_def.SetValue(False)
self.choice_centr_disp.SetSelection(0)
for tc in self.tclist_vieweroffs:
tc.SetValue(str(0))
swivels = wpproc.NUM_DISPLAYS*[(0, 0.0, 0.0, 0.0)]
tilts = wpproc.NUM_DISPLAYS*[(0.0, 0.0, 0.0)]
self.populate_grid(swivels, tilts, 1)
def onOk(self, event):
"""Apply/save perspective settings and close dialog."""
# self.checkCbmaster()
if self.tc_name.GetValue():
self.onSave()
self.EndModal(wx.ID_OK)
def onCancel(self, event):
"""Closes perspective config, throwing away unsaved contents."""
self.Destroy()
def onAlignTest(self, event=None, image=None):
"""Sets a test image wallpaper using the current perspectve config."""
use_persp = self.cb_master.GetValue()
if not use_persp:
msg = "Perspective corrections are disabled. Enable them to test?"
res = show_message_dialog(msg, style="YES_NO")
if res:
self.cb_master.SetValue(True)
self.checkCbmaster() # update & save display_sys
else:
# Don't enable, stop testing.
msg = "Perspective corrections are disabled, abort test."
res = show_message_dialog(msg)
return 0
if image:
testimage = [os.path.realpath(image)]
else:
testimage = [os.path.join(PATH, "superpaper/resources/test.png")]
if not os.path.isfile(testimage[0]):
msg = "Test image not found in {}.".format(testimage)
show_message_dialog(msg, "Error")
return 0
# Use the settings currently written out in the fields!
inches = [dsp.diagonal_size()[1] for dsp in self.display_sys.disp_list]
offsets = []
for off_tc in self.frame.tc_list_offsets:
off = off_tc.GetLineText(0).split(",")
try:
offsets.append([int(off[0]), int(off[1])])
except (IndexError, ValueError):
show_message_dialog(
"Offsets must be integer pairs separated with a comma!\n"
"Problematic offset is {}".format(off)
)
return 0
flat_offsets = []
for off in offsets:
for pix in off:
flat_offsets.append(pix)
busy = wx.BusyCursor()
# Save entered perspective values and get its name
save_succ = self.onSave()
if save_succ == 0:
# Save failed or canceled, abort test.
del busy
return 0
perspective = self.choice_profiles.GetString(
self.choice_profiles.GetSelection()
)
wx.Yield()
# Use the simplified CLI profile class
wpproc.refresh_display_data()
profile = CLIProfileData(testimage,
None,
inches,
None,
flat_offsets,
perspective
)
thrd = change_wallpaper_job(profile, force=True)
while thrd.is_alive():
time.sleep(0.5)
del busy
return 1
def onChooseTestImage(self, event):
"""Open a file dialog to choose a test image."""
with wx.FileDialog(self, "Choose a test image",
wildcard=("Image files (*.jpg;*.jpeg;*.png;*.bmp;*.gif;*.tiff;*.webp)"
"|*.jpg;*.jpeg;*.png;*.bmp;*.gif;*.tiff;*.webp"),
defaultDir=self.frame.defdir,
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as file_dialog:
if file_dialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# Proceed loading the file chosen by the user
self.test_image = file_dialog.GetPath()
self.tc_testimage.SetValue(
os.path.basename(self.test_image)
)
return
def onTestWallpaper(self, event):
"""Test the current perspective options by triggering a new wallpaper
from the active wallpaper profile, if any."""
if self.test_image:
self.onAlignTest(image=self.test_image)
else:
msg = "Choose a test image first."#.format(testimage)
show_message_dialog(msg, "Error")
return 0
return 1
def onHelpPerspective(self, evt):
"""Popup perspectives main help."""
text = ("Perspective corrections were created to fix the\n"
"wallpaper misalignment that arises when your diplays\n"
"are not in a common plane, like they would be on the\n"
"wall. Straight lines are cut into pieces when displays\n"
"are both tilted and turned respective to each other.\n"
"These corrections work by undoing the perspective changes\n"
"caused by the rotation of the displays.\n"
"\n"
"In this dialog you may configure perspective setting\n"
"profiles, and test their effects with the tools in the\n"
"lower left corner."
)
# use_per = self.cb_master.GetValue()
# persname = self.choice_profiles.GetString(self.choice_profiles.GetSelection())
pop = HelpPopup(self, text,
show_image_quality=False,
# use_perspective=use_per,
# persp_name=persname
)
btn = evt.GetEventObject()
pos = btn.ClientToScreen((0, 0))
sz = btn.GetSize()
pop.Position(pos, (0, sz[1]))
pop.Popup()
def onHelpPersProfile(self, evt):
"""Popup perspective profile help."""
text = ("Perspective corrections do not work equally well\n"
"with different kinds of images so you can create\n"
"separate profiles, such as 'tilts_only' or 'swivel+tilt'")
pop = HelpPopup(self, text)
btn = evt.GetEventObject()
pos = btn.ClientToScreen((0, 0))
sz = btn.GetSize()
pop.Position(pos, (0, sz[1]))
pop.Popup()
def onHelpCentralDisp(self, evt):
"""Popup central display & viewer position help."""
text = ("To compute the perspective transforms the (rough)\n"
"position of your eyes relative to your displays\n"
"needs to be known. This is entered by selecting\n"
"one display as the central display and entering\n"
"distance offsets relative to that display's center.\n"
"\n"
"Distance must be entered and non-zero, horizontal\n"
"and vertical offsets are optional. Lenghts are\n"
"in millimeters."
)
pop = HelpPopup(self, text)
btn = evt.GetEventObject()
pos = btn.ClientToScreen((0, 0))
sz = btn.GetSize()
pop.Position(pos, (0, sz[1]))
pop.Popup()
def onHelpData(self, evt):
"""Popup central display & viewer position help."""
text = ("Here you enter the display rotation (tilt and swivel)\n"
"parameters. Use these parameters to tell how your displays\n"
"are rotated relative to the setup where they would be in a\n"
"common plane.\n"
"The parameters are:\n"
" - swivel axis: left or right edge of the display\n"
" - swivel angle in degrees\n"
" - swivel axis lateral offset from display edge [mm]\n"
" - swivel axis depth offset from display edge [mm]\n"
" - tilt angle in degrees\n"
" - tilt axis vertical offset from horizontal midline [mm]\n"
" - tilt axis depth offset from display surface [mm]",
"Signs of angles are determined by the right hand rule:\n"
"Grab the rotation axis with your right hand fist and extend\n"
"your thumb in the direction of the axis: up for swivels and\n"
"left for tilts. Now the direction of your curled fingers will\n"
"tell the direction the display will rotate with a positive angle\n"
"and the rotation is reversed for a negative angle.",
"The axis offsets are completely optional. The most important\n"
"one is the tilt axis DEPTH offset since the actual axis\n"
"of the tilt is the joint in the display mount behind the panel.\n"
"Without this depth offset the tilt is performed around the display\n"
"horizontal midline which is on the display surface."
)
pop = HelpPopup(self, text)
btn = evt.GetEventObject()
pos = btn.ClientToScreen((0, 0))
sz = btn.GetSize()
pop.Position(pos, (0, sz[1]))
pop.Popup()
class SettingsFrame(wx.Frame):
"""Settings dialog frame."""
def __init__(self, parent_tray_obj):
wx.Frame.__init__(self, parent=None, title="Superpaper General Settings")
self.frame_sizer = wx.BoxSizer(wx.VERTICAL)
settings_panel = SettingsPanel(self, parent_tray_obj)
self.frame_sizer.Add(settings_panel, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.frame_sizer)
self.Fit()
self.Layout()
self.Center()
self.Show()
class SettingsPanel(wx.Panel):
"""Settings dialog contents."""
def __init__(self, parent, parent_tray_obj):
wx.Panel.__init__(self, parent)
self.frame = parent
self.parent_tray_obj = parent_tray_obj
self.sizer_main = wx.BoxSizer(wx.VERTICAL)
self.sizer_grid_settings = wx.GridSizer(6, 2, 5, 5)
self.sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
pnl = self
st_logging = wx.StaticText(pnl, -1, "Logging")
st_usehotkeys = wx.StaticText(pnl, -1, "Use hotkeys")
st_warn_large = wx.StaticText(pnl, -1, "Large image warning")
st_hk_next = wx.StaticText(pnl, -1, "Hotkey: Next wallpaper")
st_hk_pause = wx.StaticText(pnl, -1, "Hotkey: Pause slideshow")
st_setcmd = wx.StaticText(pnl, -1, "Custom command")
self.cb_logging = wx.CheckBox(pnl, -1, "")
self.cb_usehotkeys = wx.CheckBox(pnl, -1, "")
self.cb_warn_large = wx.CheckBox(pnl, -1, "")
self.tc_hk_next = wx.TextCtrl(pnl, -1, size=(200, -1))
self.tc_hk_pause = wx.TextCtrl(pnl, -1, size=(200, -1))
self.tc_setcmd = wx.TextCtrl(pnl, -1, size=(200, -1))
self.sizer_grid_settings.AddMany(
[
(st_logging, 0, wx.ALIGN_RIGHT),
(self.cb_logging, 0, wx.ALIGN_LEFT),
(st_usehotkeys, 0, wx.ALIGN_RIGHT),
(self.cb_usehotkeys, 0, wx.ALIGN_LEFT),
(st_warn_large, 0, wx.ALIGN_RIGHT),
(self.cb_warn_large, 0, wx.ALIGN_LEFT),
(st_hk_next, 0, wx.ALIGN_RIGHT),
(self.tc_hk_next, 0, wx.ALIGN_LEFT),
(st_hk_pause, 0, wx.ALIGN_RIGHT),
(self.tc_hk_pause, 0, wx.ALIGN_LEFT),
(st_setcmd, 0, wx.ALIGN_RIGHT),
(self.tc_setcmd, 0, wx.ALIGN_LEFT),
]
)
self.update_fields()
self.button_save = wx.Button(self, label="Save")
self.button_close = wx.Button(self, label="Close")
self.button_save.Bind(wx.EVT_BUTTON, self.onSave)
self.button_close.Bind(wx.EVT_BUTTON, self.onClose)
self.sizer_buttons.AddStretchSpacer()
self.sizer_buttons.Add(self.button_save, 0, wx.ALL, 5)
self.sizer_buttons.Add(self.button_close, 0, wx.ALL, 5)
self.sizer_main.Add(self.sizer_grid_settings, 0, wx.CENTER|wx.EXPAND|wx.ALL, 5)
self.sizer_main.Add(self.sizer_buttons, 0, wx.EXPAND)
self.SetSizer(self.sizer_main)
self.sizer_main.Fit(parent)
def update_fields(self):
"""Updates dialog field contents."""
g_settings = GeneralSettingsData()
self.cb_logging.SetValue(g_settings.logging)
self.cb_usehotkeys.SetValue(g_settings.use_hotkeys)
self.cb_warn_large.SetValue(g_settings.warn_large_img)
self.tc_hk_next.ChangeValue(self.show_hkbinding(g_settings.hk_binding_next))
self.tc_hk_pause.ChangeValue(self.show_hkbinding(g_settings.hk_binding_pause))
self.tc_setcmd.ChangeValue(g_settings.set_command)
def show_hkbinding(self, hktuple):
"""Formats hotkey tuple as a readable string."""
hkstring = "+".join(hktuple)
return hkstring
def onSave(self, event):
"""Saves settings to file."""
current_settings = GeneralSettingsData()
current_settings.logging = self.cb_logging.GetValue()
current_settings.use_hotkeys = self.cb_usehotkeys.GetValue()
current_settings.warn_large_img = self.cb_warn_large.GetValue()
if self.tc_hk_next.GetLineText(0):
current_settings.hk_binding_next = tuple(
self.tc_hk_next.GetLineText(0).strip().split("+")
)
else:
current_settings.hk_binding_next = None
if self.tc_hk_pause.GetLineText(0):
current_settings.hk_binding_pause = tuple(
self.tc_hk_pause.GetLineText(0).strip().split("+")
)
else:
current_settings.hk_binding_pause = None
current_settings.set_command = self.tc_setcmd.GetLineText(0).strip()
current_settings.save_settings()
# after saving file apply in tray object
self.parent_tray_obj.read_general_settings()
def onClose(self, event):
"""Closes settings panel."""
self.frame.Close(True)
class HelpFrame(wx.Frame):
"""Help dialog frame."""
def __init__(self, parent=None):
wx.Frame.__init__(self, parent=parent, title="Superpaper Help")
self.frame_sizer = wx.BoxSizer(wx.VERTICAL)
help_panel = HelpPanel(self)
self.frame_sizer.Add(help_panel, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.frame_sizer)
self.SetIcon(wx.Icon(TRAY_ICON, wx.BITMAP_TYPE_PNG))
self.Fit()
self.Layout()
self.Center()
self.Show()
class HelpPanel(wx.Panel):
"""Help dialog contents."""
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.frame = parent
self.sizer_main = wx.BoxSizer(wx.VERTICAL)
self.sizer_helpcontent = wx.BoxSizer(wx.VERTICAL)
self.sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
current_settings = GeneralSettingsData()
show_help = current_settings.show_help
# st_show_at_start = wx.StaticText(self, -1, "Show this help at start")
self.cb_show_at_start = wx.CheckBox(self, -1, "Show this help at start")
self.cb_show_at_start.SetValue(show_help)
self.button_close = wx.Button(self, label="Close")
self.button_close.Bind(wx.EVT_BUTTON, self.onClose)
self.sizer_buttons.AddStretchSpacer()
# self.sizer_buttons.Add(st_show_at_start, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.sizer_buttons.Add(self.cb_show_at_start, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.sizer_buttons.Add(self.button_close, 0, wx.CENTER|wx.ALL, 5)
help_str = """
How to use Superpaper:
In the Wallpaper Configuration you can adjust all your wallpaper settings. Other application wide
settings can be changed in the Settings menu. Both are accessible from the system tray menu.
IMPORTANT NOTE: For the wallpapers to be set correctly, you must set in your OS the background
fitting option to 'Span'.
Description of Wallpaper Configuration 'advanced span' options:
In advanced span mode PPI, bezel and perspective corrections are applied to the wallpaper. The
following settings are used to configure this:
- Physical display positions:
In 'advanced span' mode Superpaper corrects for different pixel densities between displays
and this means that to get a corretly spanned image across the monitor array, the relative
physical locations of the displays needs to be known. A configuration is guessed but if your
monitors are arranged in another way, you can adjust the positions of the displays by
entering the 'Positions' tool and dragging the display previews.
- Bezel correction:
Display bezel thicknesses and gaps can be taken into account when computing the wallpaper
span. Enter bezel sizes by selecting 'Configure bezels' in Advanced Wallpaper Adjustment
subsection. Adjacent bezels and gap are added together.
- Display size detection:
To apply PPI correction Superpaper needs to know the physical sizes of your displays. These
are attempted to be detected automatically. If this fails, you can enter the correct values
under 'Display Diagonal Sizes'.
- Display rotations and viewer position
Perspective corrections use the position of the viewer and the 3D alignment of the displays to
adjust the shown image. Details on how to configure this are in the helps of the perspective
configuration dialog.
Tips:
- Superpaper running in the background is controlled from the Tray Icon/Applet.
- To start Superpaper in the background, disable the 'Show this help at start' checkbox.
- You can use the given example profiles as templates: just change the name and whatever else,
save, and its a new profile.
- 'Align Test' feature allows you to test your alignment settings.
"""
# st_help = wx.StaticText(self, -1, help_str)
st_help = wx.TextCtrl(self, -1, help_str, size=(700, 400),
style=wx.TE_MULTILINE|wx.TE_READONLY)
self.sizer_helpcontent.Add(st_help, 1, wx.EXPAND|wx.CENTER|wx.ALL, 5)
self.sizer_main.Add(self.sizer_helpcontent, 1, wx.CENTER|wx.EXPAND)
self.sizer_main.Add(self.sizer_buttons, 0, wx.CENTER|wx.EXPAND)
self.SetSizer(self.sizer_main)
self.sizer_main.Fit(parent)
def onClose(self, event):
"""Closes help dialog. Saves checkbox state as needed."""
if self.cb_show_at_start.GetValue() is True:
current_settings = GeneralSettingsData()
if current_settings.show_help is False:
current_settings.show_help = True
current_settings.save_settings()
else:
# Save that the help at start is not wanted.
current_settings = GeneralSettingsData()
show_help = current_settings.show_help
if show_help:
current_settings.show_help = False
current_settings.save_settings()
self.frame.Close(True)
class HelpPopup(wx.PopupTransientWindow):
"""Popup to show a bit of static text"""
def __init__(self, parent, text,
show_image_quality=False,
use_perspective=False,
persp_name=None,
style=wx.BORDER_DEFAULT):
wx.PopupTransientWindow.__init__(self, parent, style)
self.mainframe = parent.frame
self.display_sys = None
# self.mainframe = parent.parent # persp dialog
if show_image_quality:
self.display_sys = self.mainframe.display_sys
self.advanced_on = self.mainframe.show_advanced_settings
self.show_image_quality = not self.mainframe.use_multi_image
self.use_perspective = use_perspective
self.persp_name = persp_name
else:
self.advanced_on = False
self.show_image_quality = False
self.use_perspective = False
self.persp_name = None
pnl = wx.Panel(self)
# pnl.SetBackgroundColour("CADET BLUE")
stlist = []
if isinstance(text, str):
st = wx.StaticText(pnl, -1, text)
stlist.append(st)
else:
for textstr in text:
st = wx.StaticText(pnl, -1, textstr)
stlist.append(st)
sizer = wx.BoxSizer(wx.VERTICAL)
for st in stlist:
sizer.Add(st, 0, wx.ALL, 5)
if self.show_image_quality:
st_qual = wx.StaticText(pnl, -1, self.string_ideal_image_size())
sizer.Add(st_qual, 0, wx.ALL, 5)
pnl.SetSizer(sizer)
sizer.Fit(pnl)
sizer.Fit(self)
self.Layout()
def ProcessLeftDown(self, evt):
return wx.PopupTransientWindow.ProcessLeftDown(self, evt)
def OnDismiss(self):
self.Destroy()
def string_ideal_image_size(self):
"Return a sentence what the minimum source image size is for best quality."
senten = ("For the best image quality with current settings your\n"
r" wallpapers should be {} or larger.")
if self.advanced_on:
if self.mainframe.cb_offsets.GetValue():
offsets = []
for tc in self.mainframe.tc_list_offsets:
off_str = tc.GetValue().split(",")
try:
offsets.append(
(int(off_str[0]), int(off_str[1]))
)
except (ValueError, IndexError):
offsets.append(
(0, 0)
)
else:
offsets = wpproc.NUM_DISPLAYS * [(0, 0)]
crops = self.display_sys.get_ppi_norm_crops(offsets)
persp_data = None
if self.use_perspective:
persp_data = self.display_sys.get_persp_data(self.persp_name)
if persp_data:
proj_plane_crops, persp_coeffs = persp.get_backprojected_display_system(crops,
persp_data)
# Canvas containing back-projected displays
canv = wpproc.compute_working_canvas(proj_plane_crops)
else:
canv = wpproc.compute_working_canvas(crops)
else:
canv = wpproc.compute_canvas(
wpproc.RESOLUTION_ARRAY,
wpproc.DISPLAY_OFFSET_ARRAY
)
res_str = "{}x{}".format(canv[0], canv[1])
fin = senten.format(res_str)
return fin
|
Aetf/superpaper
|
superpaper/wallpaper_processing.py
|
<reponame>Aetf/superpaper<gh_stars>0
"""
Wallpaper image processing back-end for Superpaper.
Applies image corrections, crops, merges etc. and sets the wallpaper
with native platform methods whenever possible.
Written by <NAME>, copyright 2020 under MIT licence.
"""
import configparser
import math
import os
import platform
import subprocess
import sys
from operator import itemgetter
from threading import Lock, Thread, Timer
from PIL import Image, UnidentifiedImageError
from screeninfo import get_monitors
import superpaper.perspective as persp
import superpaper.sp_logging as sp_logging
from superpaper.message_dialog import show_message_dialog
from superpaper.sp_paths import CONFIG_PATH, TEMP_PATH
# Disables PIL.Image.DecompressionBombError.
Image.MAX_IMAGE_PIXELS = None # 715827880 would be 4x default max.
def running_kde():
"""Detect if running in a KDE session."""
d_ses = os.environ.get("DESKTOP_SESSION")
if d_ses and ("plasma" in d_ses or "kde" in d_ses):
return True
kde_f_ses = os.environ.get("KDE_FULL_SESSION")
xdg_ses_dtop = os.environ.get("XDG_SESSION_DESKTOP")
if kde_f_ses == "true" or xdg_ses_dtop == "KDE":
return True
return False
if platform.system() == "Windows":
import ctypes
elif platform.system() == "Linux":
# KDE has special needs
# if os.environ.get("DESKTOP_SESSION") in ["/usr/share/xsessions/plasma", "plasma"]:
if running_kde():
import dbus
# Global constants
NUM_DISPLAYS = 0
# list of display resolutions (width,height), use tuples.
RESOLUTION_ARRAY = []
# list of display offsets (width,height), use tuples.
DISPLAY_OFFSET_ARRAY = []
G_ACTIVE_DISPLAYSYSTEM = None
G_ACTIVE_PROFILE = None
G_WALLPAPER_CHANGE_LOCK = Lock()
G_SUPPORTED_IMAGE_EXTENSIONS = (".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff", ".webp")
G_SET_COMMAND_STRING = ""
# global to take care that failure message is not shown more than once at launch
USER_TOLD_OF_PHYS_FAIL = False
class RepeatedTimer(object):
"""Threaded timer used for slideshow."""
# Credit:
# https://stackoverflow.com/questions/3393612/run-certain-code-every-n-seconds/13151299#13151299
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
"""Starts timer."""
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.daemon = True
self._timer.start()
self.is_running = True
def stop(self):
"""Stops timer."""
self._timer.cancel()
self.is_running = False
class Display():
"""
Stores refined data of a display.
Computes PPI if data is available. Stores non-negative translated offsets.
"""
def __init__(self, monitor):
self.resolution = (monitor.width, monitor.height)
self.digital_offset = (monitor.x, monitor.y)
if monitor.width_mm and monitor.height_mm:
self.phys_size_mm = tuple(
sorted(
[monitor.width_mm, monitor.height_mm],
reverse=bool(self.resolution[0]>self.resolution[1])
)
) # Take care that physical rotation matches resolution.
self.phys_size_failed = False
else:
# if physical size detection has failed, assume display is 23" diagonal
# to have it stand out
self.phys_size_mm = tuple(
sorted(
[509, 286],
reverse=bool(self.resolution[0]>self.resolution[1])
)
) # Take care that physical rotation matches resolution.
self.phys_size_failed = True
self.detected_phys_size_mm = self.phys_size_mm
self.ppi = None
self.ppi_norm_resolution = None
self.ppi_norm_offset = None
self.ppi_norm_bezels = (0, 0)
self.perspective_angles = (0, 0)
self.name = monitor.name
if self.resolution and self.phys_size_mm:
self.ppi = self.compute_ppi()
def __str__(self):
return (
f"Display("
f"resolution={self.resolution}, "
f"digital_offset={self.digital_offset}, "
f"phys_size_mm={self.phys_size_mm}, "
f"detected_phys_size_mm={self.detected_phys_size_mm}, "
f"ppi={self.ppi}, "
f"ppi_norm_resolution={self.ppi_norm_resolution}, "
f"ppi_norm_offset={self.ppi_norm_offset}, "
f"ppi_norm_bezels={self.ppi_norm_bezels}, "
f"perspective_angles={self.perspective_angles}, "
f"name={self.name!r}"
f")"
)
def __eq__(self, other):
return bool(
self.resolution == other.resolution and
self.digital_offset == other.digital_offset and
self.detected_phys_size_mm == other.detected_phys_size_mm
)
def __hash__(self):
return hash((self.resolution, self.digital_offset, self.detected_phys_size_mm))
def diagonal_size(self):
diag_mm = math.sqrt( self.phys_size_mm[0]**2 + self.phys_size_mm[1]**2 )
diag_in = round(diag_mm / 25.4, 1)
return (round(diag_mm), diag_in)
def compute_ppi(self):
if self.phys_size_mm[0]:
ppmm_horiz = self.resolution[0]/self.phys_size_mm[0]
else:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(
"Display.compute_ppi: self.phys_size_mm[0] was 0."
)
return None
if self.phys_size_mm[1]:
ppmm_vert = self.resolution[1]/self.phys_size_mm[1]
else:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(
"Display.compute_ppi: self.phys_size_mm[1] was 0."
)
return None
if abs(ppmm_horiz/ppmm_vert - 1) > 0.01:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(
"WARNING: Horizontal and vertical PPI do not match! hor: %s, ver: %s",
ppmm_horiz * 25.4, ppmm_vert * 25.4
)
sp_logging.G_LOGGER.info(str(self))
return ppmm_horiz * 25.4 # inch has 25.4 times the pixels of a millimeter.
def translate_offset(self, translate_tuple):
"""Move offset point by subtracting the input point.
This takes the top left most corner of the canvas to (0,0)
and retains relative offsets between displays as they should be.
"""
old_offsets = self.digital_offset
self.digital_offset = (
old_offsets[0] - translate_tuple[0],
old_offsets[1] - translate_tuple[1]
)
def ppi_and_physsize_from_diagonal_inch(self, diag_inch):
"""
If physical size detection fails, it can be computed by
asking the user to enter the diagonal dimension of the monitor
in inches.
"""
height_to_width_ratio = self.resolution[1]/self.resolution[0]
phys_width_inch = diag_inch / math.sqrt(1 + height_to_width_ratio**2)
phys_height_inch = height_to_width_ratio * phys_width_inch
self.phys_size_mm = (phys_width_inch * 25.4, phys_height_inch * 25.4)
self.ppi = self.resolution[0] / phys_width_inch
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(
"Updated PPI = %s and phys_size_mm = %s based on diagonal size: %s inches",
self.ppi,
self.phys_size_mm,
diag_inch
)
sp_logging.G_LOGGER.info(
str(self)
)
class DisplayLight():
"""Small class to store resolution and position data a kin to full Display."""
def __init__(self, res, off, bez):
self.resolution = res
self.digital_offset = off
if bez:
self.ppi_norm_bezels = bez
else:
self.ppi_norm_bezels = (0, 0)
def __str__(self):
return (
f"DisplayLight("
f"resolution={self.resolution}, "
f"digital_offset={self.digital_offset}, "
f"ppi_norm_bezels={self.ppi_norm_bezels} "
f")"
)
class DisplaySystem():
"""
Handle the display system as a whole, applying user data such as
bezel corrections, offsets, physical layout, and produces
resolutions and offsets that are used to set the wallpaper
in advanced mode.
"""
def __init__(self):
self.disp_list = get_display_data()
self.compute_ppinorm_resolutions()
# Data
self.use_user_diags = False
self.use_perspective = True
self.default_perspective = None
self.perspective_dict = {}
self.load_system()
self.load_perspectives()
# if user diags are not entered, tell about failed physical sizes
global USER_TOLD_OF_PHYS_FAIL
if not self.use_user_diags:
for dsp in self.disp_list:
if dsp.phys_size_failed and not USER_TOLD_OF_PHYS_FAIL:
msg = ("Detection of the diagonal size of a display has failed. "
"It will show up as a 23 inch display in advanced mode. "
"Enter the correct diagonal size with the Override Detected "
"Sizes tool.")
show_message_dialog(msg)
USER_TOLD_OF_PHYS_FAIL = True
def __eq__(self, other):
# return bool(tuple(self.disp_list) == tuple(other.disp_list))
for dsp_1, dsp_2 in zip(self.disp_list, other.disp_list):
if dsp_1 == dsp_2:
continue
else:
return False
if len(self.disp_list) == len(other.disp_list):
return True
else:
return False
def __hash__(self):
return hash(tuple(self.disp_list))
def max_ppi(self):
"""Return maximum pixel density."""
return max([disp.ppi for disp in self.disp_list])
def get_normalized_ppis(self):
"""Return list of PPI values normalized to the max_ppi."""
max_ppi = self.max_ppi()
return [disp.ppi/max_ppi for disp in self.disp_list]
def compute_ppinorm_resolutions(self):
"""Update disp_list PPI density normalized sizes of the real resolutions."""
rel_ppis = self.get_normalized_ppis()
for r_ppi, dsp in zip(rel_ppis, self.disp_list):
dsp.ppi_norm_resolution = (
round(dsp.resolution[0] / r_ppi),
round(dsp.resolution[1] / r_ppi)
)
def get_ppi_norm_crops(self, manual_offsets):
"""Returns list of ppi_norm crop tuples to cut from ppi_norm canvas.
A valid crop is a 4-tuple: (left, top, right, bottom).
"""
crops = []
for dsp in self.disp_list:
try:
off = manual_offsets[self.disp_list.index(dsp)]
except IndexError:
off = (0, 0)
left_top = (
round(dsp.ppi_norm_offset[0] + off[0]),
round(dsp.ppi_norm_offset[1] + off[1])
)
right_btm = (
round(dsp.ppi_norm_resolution[0]) + left_top[0],
round(dsp.ppi_norm_resolution[1]) + left_top[1],
)
crops.append(left_top + right_btm)
sp_logging.G_LOGGER.info("get_ppi_norm_offsets: %s", self.get_ppinorm_offsets())
sp_logging.G_LOGGER.info("get_ppi_norm_crops: %s", crops)
return crops
def fits_in_column(self, disp, col):
"""Test if IN DEKSTOP RES the horiz center of disp is below the last disp in the col."""
col_last_disp = col[-1]
disp_cntr = (disp.digital_offset[0] + disp.digital_offset[0] + disp.resolution[0])/2 #(left+right)/2
col_last_left = col_last_disp.digital_offset[0]
col_last_right = col_last_disp.digital_offset[0] + col_last_disp.resolution[0]
if (disp_cntr > col_last_left and disp_cntr < col_last_right):
return True
else:
return False
def column_size(self, col):
width = max([dsp.ppi_norm_resolution[0] + dsp.ppi_norm_bezels[0] for dsp in col])
height = sum([dsp.ppi_norm_resolution[1] + dsp.ppi_norm_bezels[1] for dsp in col])
return (width, height)
def compute_initial_preview_offsets(self):
"""
Uses desktop layout data to arrange the displays in their
physical dimensions in to horizontally centered columns and
then concatenating these columns horizontally centered, with
each columns width being that of the widest display in the
column. Display list needs to be sorted so that displays in
a column are together and then the columns progress left
to right.
Column composition is TESTED with resolution but column SIZES
are in PPI normalized resolutions to reflect the physical sizes
of the displays.
"""
# Construct columns from disp_list
columns = []
work_col = []
for dsp in self.disp_list:
if work_col == []:
work_col.append(dsp)
if dsp == self.disp_list[-1]:
columns.append(work_col)
else:
if self.fits_in_column(dsp, work_col):
work_col.append(dsp)
else:
columns.append(work_col)
work_col = [dsp]
if dsp == self.disp_list[-1]:
columns.append(work_col)
# print("columns", columns)
# print("work_col", work_col)
# print("columns done", columns)
# for col in columns:
# for dsp in col:
# print(str(dsp))
col_ids = [list(range(len(col))) for col in columns]
# sort columns in place vertically in digital offset
sorted_ids = []
sorted_columns = []
for ids, col in zip(col_ids, columns):
# col.sort(key=lambda x: x.digital_offset[1])
srt_id, srt_col = (list(t) for t in zip(*sorted(zip(ids, col), key=lambda pair: pair[1].digital_offset[1])))
sorted_ids.append(srt_id)
sorted_columns.append(srt_col)
columns = sorted_columns
# print("columns sorted", columns, "sorted_ids", sorted_ids)
# for col in columns:
# for dsp in col:
# print(str(dsp))
if columns == []:
sp_logging.G_LOGGER.info(
"DisplaySystem column recostruction has failed completely. Trigger fallback.")
columns = [[dsp] for dsp in self.disp_list]
# Tile columns on to the plane with vertical centering
try:
col_sizes = [self.column_size(col) for col in columns]
except (ValueError, IndexError):
sp_logging.G_LOGGER.info("Problem with column sizes. col_sizes: %s",
col_sizes)
# print("col_sizes", col_sizes)
try:
max_col_h = max([sz[1] for sz in col_sizes])
except ValueError:
sp_logging.G_LOGGER.info("There are no column sizes? col_sizes: %s",
col_sizes)
col_left_tops = []
current_left = 0
for sz in col_sizes:
col_left_tops.append(
(
current_left,
round((max_col_h - sz[1])/2)
)
)
current_left += sz[0]
# print("col_left_tops", col_left_tops)
# Tile displays in columns onto the plane with horizontal centering
# within the column. Anchor columns to col_left_tops.
for col, col_anchor in zip(columns, col_left_tops):
current_top = 0
max_dsp_w = max([dsp.ppi_norm_resolution[0] + dsp.ppi_norm_bezels[0] for dsp in col])
for dsp in col:
dsp_w = dsp.ppi_norm_resolution[0] + dsp.ppi_norm_bezels[0]
dsp.ppi_norm_offset = (
col_anchor[0]
+ round((max_dsp_w - dsp_w)/2),
col_anchor[1] + current_top
)
# print(dsp.ppi_norm_offset)
current_top += dsp.ppi_norm_resolution[1] + dsp.ppi_norm_bezels[1]
# Restore column order to the original order that matches self.disp_list and other sorts (kde).
restored_columns = []
for ids, col in zip(sorted_ids, columns):
srt_id, srt_col = (list(t) for t in zip(*sorted(zip(ids, col), key=lambda pair: pair[0])))
restored_columns.append(srt_col)
columns = restored_columns
# Update offsets to disp_list
flattened_cols = [dsp for col in columns for dsp in col]
for scope_dsp, dsp in zip(flattened_cols, self.disp_list):
dsp.ppi_norm_offset = scope_dsp.ppi_norm_offset
# print("PPI NORM RESOLUTIONS AND OFFSETS")
# print([(dsp.ppi_norm_resolution, dsp.ppi_norm_offset) for dsp in self.disp_list])
# sys.exit()
def get_disp_list(self, use_ppi_norm = False):
if use_ppi_norm:
disp_l = []
for dsp in self.disp_list:
disp_l.append(
DisplayLight(
dsp.ppi_norm_resolution,
dsp.ppi_norm_offset,
dsp.ppi_norm_bezels
)
)
return disp_l
else:
disp_l = self.disp_list
return disp_l
def get_ppinorm_offsets(self):
"""Return ppi norm offsets."""
pnoffs = []
for dsp in self.disp_list:
pnoffs.append(
dsp.ppi_norm_offset
)
return pnoffs
def get_persp_data(self, persp_name):
"""Return a dict of perspective settings."""
if persp_name == "default":
get_id = self.default_perspective
else:
get_id = persp_name
if not get_id or get_id == "disabled" or get_id not in self.perspective_dict:
return None
return self.perspective_dict[get_id]
def update_ppinorm_offsets(self, offsets, bezels_included=False):
"""Write ppi_norm resolution offsets as determined
in the GUI into Displays."""
for dsp, offs in zip(self.disp_list, offsets):
dsp.ppi_norm_offset = offs
def update_bezels(self, bezels_mm):
"""Update displays with new bezel sizes."""
# test that input values are positive
for bez_pair in bezels_mm:
for bez in bez_pair:
if bez < 0:
msg = ("Bezel thickness must be a "
"non-negative number, {} was entered.").format(bez)
sp_logging.G_LOGGER.info(msg)
show_message_dialog(msg, "Error")
return 0
# convert to normalized pixel units
max_ppmm = self.max_ppi() / 25.4
bezels_ppi_norm = [(bz[0] * max_ppmm, bz[1] * max_ppmm) for bz in bezels_mm]
for bz_px, dsp in zip(bezels_ppi_norm, self.disp_list):
dsp.ppi_norm_bezels = bz_px
sp_logging.G_LOGGER.info("update_bezels: %s", bz_px)
self.compute_initial_preview_offsets()
return 1
def bezels_in_mm(self):
"""Return list of bezel thicknesses in millimeters."""
bezels_mm = []
max_ppmm = self.max_ppi() / 25.4
for dsp in self.disp_list:
bezels_mm.append(
(
round(dsp.ppi_norm_bezels[0] / max_ppmm, 2),
round(dsp.ppi_norm_bezels[1] / max_ppmm, 2)
)
)
return bezels_mm
def bezels_in_px(self):
"""Return list of bezel thicknesses in ppi norm px."""
bezels = []
for dsp in self.disp_list:
bezels.append(
(
dsp.ppi_norm_bezels[0],
dsp.ppi_norm_bezels[1]
)
)
return bezels
def update_display_diags(self, diag_inches):
"""Overwrite detected display sizes with user input."""
if diag_inches == "auto":
self.use_user_diags = False
for dsp in self.disp_list:
dsp.phys_size_mm = dsp.detected_phys_size_mm
dsp.ppi = dsp.compute_ppi()
self.compute_ppinorm_resolutions()
self.compute_initial_preview_offsets()
else:
self.use_user_diags = True
for dsp, diag in zip(self.disp_list, diag_inches):
dsp.ppi_and_physsize_from_diagonal_inch(diag)
self.compute_ppinorm_resolutions()
self.compute_initial_preview_offsets()
def save_system(self):
"""Save the current DisplaySystem instance user given data
in a central file (CONFIG_PATH/display_systems.dat).
Data is saved with a DisplaySystem specific has as the key,
and data saved include:
- ppi_norm offsets which contain any given bezel thicknesses
- bezel (bez+gap+bez) sizes for (right_b, bottom_b)
- display diagonal sizes if any of them are manually changed
- rotation angles of displays for perspective correction
"""
archive_file = os.path.join(CONFIG_PATH, "display_systems.dat")
instance_key = hash(self)
# collect data for saving
ppi_norm_offsets = []
bezel_mms = self.bezels_in_mm()
diagonal_inches = []
use_perspective = self.use_perspective
def_perspective = str(self.default_perspective)
for dsp in self.disp_list:
ppi_norm_offsets.append(dsp.ppi_norm_offset)
diagonal_inches.append(dsp.diagonal_size()[1])
if not self.use_user_diags:
diagonal_inches = None
# load previous configs if file is found
config = configparser.ConfigParser()
if os.path.exists(archive_file):
config.read(archive_file)
# entering data to config under instance_key
config[instance_key] = {
"ppi_norm_offsets": list_to_str(ppi_norm_offsets, item_len=2),
"bezel_mms": list_to_str(bezel_mms, item_len=2),
"user_diagonal_inches": list_to_str(diagonal_inches, item_len=1),
"use_perspective": str(int(use_perspective)),
"def_perspective": def_perspective
}
sp_logging.G_LOGGER.info(
"Saving DisplaySystem: key: %s, ppi_norm_offsets: %s, "
"bezel_mms: %s, user_diagonal_inches: %s, "
"use_perspective: %s, def_perspective: %s",
instance_key,
ppi_norm_offsets,
bezel_mms,
diagonal_inches,
use_perspective,
def_perspective
)
# write config to file
with open(archive_file, 'w') as configfile:
config.write(configfile)
# Once profile is saved make it available for wallpaper setter
refresh_display_data()
def load_system(self):
"""Try to load system data from database based on initialization data,
i.e. the Display list. If no pre-existing system is found, try to guess
the system topology and update disp_list"""
archive_file = os.path.join(CONFIG_PATH, "display_systems.dat")
instance_key = str(hash(self))
found_match = False
# check if file exists and if the current key exists in it
if os.path.exists(archive_file):
config = configparser.ConfigParser()
config.read(archive_file)
sp_logging.G_LOGGER.info("config.sections: %s", config.sections())
if instance_key in config:
found_match = True
else:
sp_logging.G_LOGGER.info("load: system not found with hash %s", instance_key)
else:
sp_logging.G_LOGGER.info("load_system: archive_file not found: %s", archive_file)
if found_match:
# read values
# and push them into self.disp_list
instance_data = config[instance_key]
ppi_norm_offsets = str_to_list(instance_data["ppi_norm_offsets"],
item_len=2)
bezel_mms = str_to_list(instance_data["bezel_mms"],
item_len=2)
bezel_mms = [(round(bez[0], 2), round(bez[1], 2)) for bez in bezel_mms]
diagonal_inches = str_to_list(instance_data["user_diagonal_inches"],
item_len=1)
use_perspective = bool(int(instance_data.get("use_perspective", 0)))
def_perspective = instance_data.get("def_perspective", "None")
sp_logging.G_LOGGER.info(
"DisplaySystem loaded: P.N.Offs: %s, "
"bezel_mmṣ: %s, "
"user_diagonal_inches: %s, "
"use_perspective: %s, "
"def_perspective: %s",
ppi_norm_offsets, bezel_mms, diagonal_inches,
use_perspective, def_perspective
)
self.update_bezels(bezel_mms)
self.update_ppinorm_offsets(ppi_norm_offsets) # Bezels & user diagonals always included.
if diagonal_inches:
sp_logging.G_LOGGER.info("Updating diagonal_inches")
self.update_display_diags(diagonal_inches)
self.compute_ppinorm_resolutions()
self.use_perspective = use_perspective
if def_perspective == "None":
self.default_perspective = None
else:
self.default_perspective = def_perspective
else:
# Continue without data
self.compute_initial_preview_offsets()
def update_perspectives(self, persp_name, use_persp_master, is_ds_def, viewer_data, swivels, tilts):
"""Update perspective data.
Common data across all profiles:
- master toggle for perspective corrections
Data types in a profile are:
- index of central display
- viewer's position relative to the center of the central display
- lateral, vertical, depth
- swivel data as a list over each display
- axis in ["left", "right"]
- points up
- angle
- sign with right hand rule
- axis offset: (lateral, depth)
- tilt data as a list over each display
- angle (axis is the equator line of the display)
- axis points left
- sign with right hand rule
- axis offset: (vertical, depth)
"""
centr_disp, viewer_pos = viewer_data
self.use_perspective = use_persp_master
if is_ds_def and self.default_perspective != persp_name:
self.default_perspective = persp_name
elif not is_ds_def and self.default_perspective == persp_name:
self.default_perspective = None
self.save_system()
if persp_name is not None:
if persp_name not in self.perspective_dict:
self.perspective_dict[persp_name] = {}
self.perspective_dict[persp_name]["central_disp"] = centr_disp
self.perspective_dict[persp_name]["viewer_pos"] = viewer_pos
self.perspective_dict[persp_name]["swivels"] = swivels
self.perspective_dict[persp_name]["tilts"] = tilts
# trigger save afterwards (not here)
def save_perspectives(self):
"""Save perspective data dict to file."""
instance_key = str(hash(self))
persp_file = os.path.join(CONFIG_PATH, instance_key + ".persp")
# load previous configs if file is found
config = configparser.ConfigParser()
# if os.path.exists(persp_file):
# config.read(persp_file)
for sect in self.perspective_dict:
config[sect] = {
"central_disp": str(self.perspective_dict[sect]["central_disp"]),
"viewer_pos": list_to_str(self.perspective_dict[sect]["viewer_pos"], item_len=1),
"swivels": list_to_str(self.perspective_dict[sect]["swivels"], item_len=4),
"tilts": list_to_str(self.perspective_dict[sect]["tilts"], item_len=3)
}
sp_logging.G_LOGGER.info("Saving perspective profs: %s", config.sections())
# write config to file
with open(persp_file, 'w') as configfile:
config.write(configfile)
def load_perspectives(self):
"""Load perspective data dict from file."""
instance_key = str(hash(self))
persp_file = os.path.join(CONFIG_PATH, instance_key + ".persp")
# check if file exists and load saved perspective dicts
if os.path.exists(persp_file):
config = configparser.ConfigParser()
config.read(persp_file)
sp_logging.G_LOGGER.info("Loading perspective profs: %s", config.sections())
self.perspective_dict = {}
for sect in config.sections():
self.perspective_dict[sect] = {
"central_disp": int(config[sect]["central_disp"]),
"viewer_pos": str_to_list(config[sect]["viewer_pos"], item_len=1),
"swivels": str_to_list(config[sect]["swivels"], item_len=4, strings=True),
"tilts": str_to_list(config[sect]["tilts"], item_len=3)
}
else:
pass
# End DisplaySystem
def list_to_str(lst, item_len=1):
"""Format lists as ,(;) separated strings."""
if item_len == 1:
if lst:
return ",".join(str(lst_itm) for lst_itm in lst)
else:
return "None"
else:
joined_items = []
for sub_lst in lst:
joined_items.append(",".join(str(sub_itm) for sub_itm in sub_lst))
return ";".join(joined_items)
def str_to_list(joined_list, item_len=1, strings=False):
"""Extract list from joined_list."""
if item_len == 1:
if joined_list in [None, "None"]:
return None
split_list = joined_list.split(",")
conv_list = []
for item in split_list:
try:
val = int(item)
except ValueError:
try:
val = float(item)
except ValueError:
if not strings:
sp_logging.G_LOGGER.info(
"str_to_list: ValueError: not int or float: %s", item
)
conv_list.append(val)
return conv_list
else:
split_list = joined_list.split(";")
conv_list = []
for item in split_list:
split_item = item.split(",")
conv_item = []
for sub_item in split_item:
try:
val = int(sub_item)
except ValueError:
try:
val = float(sub_item)
except ValueError:
if not strings:
sp_logging.G_LOGGER.info(
"str_to_list: ValueError: not int or float: %s", sub_item
)
conv_item.append(val)
conv_list.append(tuple(conv_item))
return conv_list
def extract_global_vars(disp_list):
res_arr = []
off_arr = []
for disp in disp_list:
res_arr.append(disp.resolution)
off_arr.append(disp.digital_offset)
return [res_arr, off_arr]
def get_display_data():
"""
Updates global display variables: number of displays, resolutions and offsets.
Returns a list of Display objects, one for each monitor. Offsets are sanitized
so that they are always non-negative.
"""
# https://github.com/rr-/screeninfo
global NUM_DISPLAYS, RESOLUTION_ARRAY, DISPLAY_OFFSET_ARRAY
RESOLUTION_ARRAY = []
DISPLAY_OFFSET_ARRAY = []
monitors = get_monitors()
while not monitors:
monitors = get_monitors()
sp_logging.G_LOGGER.info("Had to re-query for display data.")
NUM_DISPLAYS = len(monitors)
display_list = []
for monitor in monitors:
display_list.append(Display(monitor))
# Check that there are no negative offsets and fix if any are found.
leftmost_offset = min([disp.digital_offset[0] for disp in display_list])
topmost_offset = min([disp.digital_offset[1] for disp in display_list])
if leftmost_offset < 0 or topmost_offset < 0:
for disp in display_list:
disp.translate_offset((leftmost_offset, topmost_offset))
# sort display list by digital offsets
display_list.sort(key=lambda x: x.digital_offset)
# extract global variables for legacy compatibility
RESOLUTION_ARRAY, DISPLAY_OFFSET_ARRAY = extract_global_vars(display_list)
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(
"get_display_data output: NUM_DISPLAYS = %s, RES_ARR = %s, OFF_ARR = %s",
NUM_DISPLAYS,
RESOLUTION_ARRAY,
DISPLAY_OFFSET_ARRAY
)
for disp in display_list:
sp_logging.G_LOGGER.info(str(disp))
return display_list
def refresh_display_data():
global G_ACTIVE_DISPLAYSYSTEM
G_ACTIVE_DISPLAYSYSTEM = DisplaySystem()
def compute_canvas(res_array, offset_array):
"""Computes the size of the total desktop area from monitor resolutions and offsets."""
# Take the subtractions of right-most right - left-most left
# and bottom-most bottom - top-most top (=0).
leftmost = 0
topmost = 0
right_edges = []
bottom_edges = []
for res, off in zip(res_array, offset_array):
right_edges.append(off[0]+res[0])
bottom_edges.append(off[1]+res[1])
# Right-most edge.
rightmost = max(right_edges)
# Bottom-most edge.
bottommost = max(bottom_edges)
canvas_size = [rightmost - leftmost, bottommost - topmost]
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Canvas size: %s", canvas_size)
return canvas_size
def compute_ppi_corrected_res_array(res_array, ppi_list_rel_density):
"""Return ppi density normalized sizes of the real resolutions."""
eff_res_array = []
for i in range(len(res_array)):
effw = round(res_array[i][0] / ppi_list_rel_density[i])
effh = round(res_array[i][1] / ppi_list_rel_density[i])
eff_res_array.append((effw, effh))
return eff_res_array
# resize image to fill given rectangle and do a centered crop to size.
# Return output image.
def resize_to_fill(img, res, quality=Image.LANCZOS):
"""Resize image to fill given rectangle and do a centered crop to size."""
if quality == "fast":
quality = Image.HAMMING
reducing_gap = 1.5
else:
quality = Image.LANCZOS
reducing_gap = None
if not img.mode == "RGB":
img = img.convert("RGB")
image_size = img.size # returns image (width,height)
if image_size == res:
# input image is already of the correct size, no action needed.
return img
image_ratio = image_size[0] / image_size[1]
target_ratio = res[0] / res[1]
# resize along the shorter edge to get an image that is at least of the
# target size on the shorter edge.
if image_ratio < target_ratio: # img not wide enough / is too tall
resize_multiplier = res[0] / image_size[0]
new_size = (
round(resize_multiplier * image_size[0]),
round(resize_multiplier * image_size[1]))
img = img.resize(new_size, resample=quality, reducing_gap=reducing_gap)
# crop vertically to target height
extra_height = new_size[1] - res[1]
if extra_height < 0:
sp_logging.G_LOGGER.info(
"Error with cropping vertically, resized image \
wasn't taller than target size.")
return -1
if extra_height == 0:
# image is already at right height, no cropping needed.
return img
# (left edge, half of extra height from top,
# right edge, bottom = top + res[1]) : force correct height
crop_tuple = (
0,
round(extra_height/2),
new_size[0],
round(extra_height/2) + res[1])
cropped_res = img.crop(crop_tuple)
if cropped_res.size == res:
return cropped_res
else:
sp_logging.G_LOGGER.info(
"Error: result image not of correct size. crp:%s, res:%s",
cropped_res.size, res)
return -1
elif image_ratio >= target_ratio: # img not tall enough / is too wide
resize_multiplier = res[1] / image_size[1]
new_size = (
round(resize_multiplier * image_size[0]),
round(resize_multiplier * image_size[1]))
img = img.resize(new_size, resample=quality, reducing_gap=reducing_gap)
# crop horizontally to target width
extra_width = new_size[0] - res[0]
if extra_width < 0:
sp_logging.G_LOGGER.info(
"Error with cropping horizontally, resized image \
wasn't wider than target size.")
return -1
if extra_width == 0:
# image is already at right width, no cropping needed.
return img
# (half of extra from left edge, top edge,
# right = left + desired width, bottom) : force correct width
crop_tuple = (
round(extra_width/2),
0,
round(extra_width/2) + res[0],
new_size[1])
cropped_res = img.crop(crop_tuple)
if cropped_res.size == res:
return cropped_res
else:
sp_logging.G_LOGGER.info(
"Error: result image not of correct size. crp:%s, res:%s",
cropped_res.size, res)
return -1
def get_center(res):
"""Computes center point of a resolution rectangle."""
return (round(res[0] / 2), round(res[1] / 2))
def get_all_centers(resarr_eff, manual_offsets):
"""Computes center points of given resolution list taking into account their offsets."""
centers = []
sum_widths = 0
# get the vertical pixel distance of the center of the left most display
# from the top.
center_standard_height = get_center(resarr_eff[0])[1]
if len(manual_offsets) < len(resarr_eff):
sp_logging.G_LOGGER.info("get_all_centers: Not enough manual offsets: \
%s for displays: %s",
len(manual_offsets),
len(resarr_eff))
else:
for i in range(len(resarr_eff)):
horiz_radius = get_horizontal_radius(resarr_eff[i])
# here take the center height to be the same for all the displays
# unless modified with the manual offset
center_pos_from_anchor_left_top = (
sum_widths + manual_offsets[i][0] + horiz_radius,
center_standard_height + manual_offsets[i][1])
centers.append(center_pos_from_anchor_left_top)
sum_widths += resarr_eff[i][0]
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("centers: %s", centers)
return centers
def get_lefttop_from_center(center, res):
"""Compute top left coordinate of a rectangle from its center."""
return (center[0] - round(res[0] / 2), center[1] - round(res[1] / 2))
def get_rightbottom_from_lefttop(lefttop, res):
"""Compute right bottom corner of a rectangle from its left top."""
return (lefttop[0] + res[0], lefttop[1] + res[1])
def get_horizontal_radius(res):
"""Returns half the width of the input rectangle."""
return round(res[0] / 2)
def compute_crop_tuples(resolution_array_ppinormalized, manual_offsets):
# Assume the centers of the physical displays are aligned on common
# horizontal line. If this is not the case one must use the manual
# offsets defined in the profile for adjustment (and bezel corrections).
# Anchor positions to the top left corner of the left most display. If
# its size is scaled up, one will need to adjust the horizontal positions
# of all the displays. (This is automatically handled by using the
# effective resolution array).
# Additionally one must make sure that the highest point of the display
# arrangement is at y=0.
crop_tuples = []
centers = get_all_centers(resolution_array_ppinormalized, manual_offsets)
for center, res in zip(centers, resolution_array_ppinormalized):
lefttop = get_lefttop_from_center(center, res)
rightbottom = get_rightbottom_from_lefttop(lefttop, res)
crop_tuples.append(lefttop + rightbottom)
# Translate crops so that the highest point is at y=0 -- remember to add
# translation to both top and bottom coordinates! Same horizontally.
# Left-most edge of the crop tuples.
leftmost = min(crop_tuples, key=itemgetter(0))[0]
# Top-most edge of the crop tuples.
topmost = min(crop_tuples, key=itemgetter(1))[1]
if leftmost == 0 and topmost == 0:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("crop_tuples: %s", crop_tuples)
return crop_tuples # [(left, up, right, bottom),...]
else:
crop_tuples_translated = translate_crops(
crop_tuples, (leftmost, topmost))
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("crop_tuples_translated: %s", crop_tuples_translated)
return crop_tuples_translated # [(left, up, right, bottom),...]
def translate_crops(crop_tuples, translate_tuple):
"""Translate crop tuples to be over the image are, i.e. left top at (0,0)."""
crop_tuples_translated = []
for crop_tuple in crop_tuples:
crop_tuples_translated.append(
(crop_tuple[0] - translate_tuple[0],
crop_tuple[1] - translate_tuple[1],
crop_tuple[2] - translate_tuple[0],
crop_tuple[3] - translate_tuple[1]))
return crop_tuples_translated
def compute_working_canvas(crop_tuples):
"""Computes effective size of the desktop are taking into account PPI/offsets/bezels."""
# Take the subtractions of right-most right - left-most left
# and bottom-most bottom - top-most top (=0).
leftmost = 0
topmost = 0
# Right-most edge of the crop tuples.
rightmost = max(crop_tuples, key=itemgetter(2))[2]
# Bottom-most edge of the crop tuples.
bottommost = max(crop_tuples, key=itemgetter(3))[3]
canvas_size = [rightmost - leftmost, bottommost - topmost]
return canvas_size
def alternating_outputfile(prof_name):
"""Return alternating output filename and old filename.
This is done so that the cache doesn't become a huge dump of unused files,
and it is alternating since some OSs don't update their wallpapers if the
current image file is overwritten.
"""
platf = platform.system()
if platf == "Windows":
ftype = "jpg"
else:
ftype = "png"
outputfile = os.path.join(TEMP_PATH, prof_name + "-a." + ftype)
if os.path.isfile(outputfile):
outputfile_old = outputfile
outputfile = os.path.join(TEMP_PATH, prof_name + "-b." + ftype)
else:
outputfile_old = os.path.join(TEMP_PATH, prof_name + "-b." + ftype)
return (outputfile, outputfile_old)
def span_single_image_simple(profile, force):
"""
Spans a single image across all monitors. No corrections.
This simple method resizes the source image so it fills the whole
desktop canvas. Since no corrections are applied, no offset dependent
cuts are needed and so this should work on any monitor arrangement.
"""
file = profile.next_wallpaper_files()[0]
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(file)
try:
img = Image.open(file)
except UnidentifiedImageError:
sp_logging.G_LOGGER.info(("Opening image '%s' failed with PIL.UnidentifiedImageError."
"It could be corrupted or is of foreign type."), file)
canvas_tuple = tuple(compute_canvas(RESOLUTION_ARRAY, DISPLAY_OFFSET_ARRAY))
img_resize = resize_to_fill(img, canvas_tuple)
outputfile, outputfile_old = alternating_outputfile(profile.name)
img_resize.save(outputfile, quality=95) # set quality if jpg is used, png unaffected
if profile.name == G_ACTIVE_PROFILE or force:
set_wallpaper(outputfile, force)
if os.path.exists(outputfile_old):
os.remove(outputfile_old)
return 0
def group_persp_data(persp_dat, groups):
"""Rerturn list of grouped perspective data objects."""
if not persp_dat:
return [None] * len(groups)
group_persp_data_list = []
for grp in groups:
group_data = {
"central_disp": persp_dat["central_disp"],
"viewer_pos": persp_dat["viewer_pos"],
"swivels": [persp_dat["swivels"][index] for index in grp],
"tilts": [persp_dat["tilts"][index] for index in grp]
}
group_persp_data_list.append(group_data)
return group_persp_data_list
def translate_to_group_coordinates(group_crop_list):
"""Translates lists of group crops into groups internal coordinates."""
if len(group_crop_list) == 1:
return group_crop_list
else:
group_crop_list_transl = []
for grp_crops in group_crop_list:
left_anch = min([crp[0] for crp in grp_crops])
top_anch = min([crp[1] for crp in grp_crops])
transl_crops = []
for crp in grp_crops:
transl_crops.append(
(crp[0] - left_anch,
crp[1] - top_anch,
crp[2] - left_anch,
crp[3] - top_anch)
)
group_crop_list_transl.append(transl_crops)
return group_crop_list_transl
# Take pixel densities of displays into account to have the image match
# physically between displays.
def span_single_image_advanced(profile, force):
"""
Applies wallpaper using PPI, bezel, offset corrections.
Further description todo.
"""
files = profile.next_wallpaper_files()
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(files)
try:
img_list = [Image.open(fil) for fil in files]
except UnidentifiedImageError:
sp_logging.G_LOGGER.info(("Opening image '%s' failed with PIL.UnidentifiedImageError."
"It could be corrupted or is of foreign type."), files)
# Cropping now sections of the image to be shown, USE EFFECTIVE WORKING
# SIZES. Also EFFECTIVE SIZE Offsets are now required.
manual_offsets = profile.manual_offsets
cropped_images = {}
crop_tuples = G_ACTIVE_DISPLAYSYSTEM.get_ppi_norm_crops(manual_offsets)
sp_logging.G_LOGGER.info("G_A_DSYS.use_perspective: %s, prof.perspective: %s",
G_ACTIVE_DISPLAYSYSTEM.use_perspective,
profile.perspective)
persp_dat = None
if G_ACTIVE_DISPLAYSYSTEM.use_perspective:
persp_dat = G_ACTIVE_DISPLAYSYSTEM.get_persp_data(profile.perspective)
if profile.spangroups:
spangroups = profile.spangroups
else:
spangroups = [list(range(NUM_DISPLAYS))]
grp_crop_tuples = translate_to_group_coordinates(
[[crop_tuples[index] for index in grp] for grp in spangroups])
grp_res_array = [[RESOLUTION_ARRAY[index] for index in grp] for grp in spangroups]
grp_persp_dat = group_persp_data(persp_dat, spangroups)
for img, grp, grp_p_dat, grp_crops, grp_res_arr in zip(img_list,
spangroups,
grp_persp_dat,
grp_crop_tuples,
grp_res_array):
if persp_dat:
proj_plane_crops, persp_coeffs = persp.get_backprojected_display_system(grp_crops,
grp_p_dat)
# Canvas containing back-projected displays
canvas_tuple_proj = tuple(compute_working_canvas(proj_plane_crops))
# Canvas containing ppi normalized displays
canvas_tuple_trgt = tuple(compute_working_canvas(grp_crops))
sp_logging.G_LOGGER.info("Back-projected canvas size: %s", canvas_tuple_proj)
img_workingsize = resize_to_fill(img, canvas_tuple_proj)
for crop_tup, coeffs, ppin_crop, (i_res, res) in zip(proj_plane_crops,
persp_coeffs,
grp_crops,
enumerate(grp_res_arr)):
# Whole image needs to be transformed for each display separately
# since the coeffs live between the full back-projected plane
# containing all displays and the full 'target' working canvas
# size canvas_tuple_trgt containing ppi normalized displays.
persp_crop = img_workingsize.transform(canvas_tuple_trgt,
Image.PERSPECTIVE, coeffs,
Image.BICUBIC)
## persp_crop.save(str(canvas_tuple_trgt)+str(crop_tup), "PNG")
# Crop desired region from transformed image which is now in
# ppi normalized resolution
crop_img = persp_crop.crop(ppin_crop)
# Resize correct crop to actual display resolution
crop_img = crop_img.resize(res, resample=Image.LANCZOS)
# cropped_images.append(crop_img) #old
cropped_images[grp[i_res]] = crop_img
else:
# larger working size needed to fill all the normalized lower density
# displays. Takes account manual offsets that might require extra space.
canvas_tuple_eff = tuple(compute_working_canvas(grp_crops))
# Image is now the height of the eff tallest display + possible manual
# offsets and the width of the combined eff widths + possible manual
# offsets.
img_workingsize = resize_to_fill(img, canvas_tuple_eff)
# Simultaneously make crops at working size and then resize down to actual
# resolution from RESOLUTION_ARRAY as needed.
for crop_tup, (i_res, res) in zip(grp_crops, enumerate(grp_res_arr)):
crop_img = img_workingsize.crop(crop_tup)
if crop_img.size == res:
# cropped_images.append(crop_img)
cropped_images[grp[i_res]] = crop_img
else:
crop_img = crop_img.resize(res, resample=Image.LANCZOS)
# cropped_images.append(crop_img)
cropped_images[grp[i_res]] = crop_img
# Combine crops to a single canvas of the size of the actual desktop
# actual combined size of the display resolutions
canvas_tuple_fin = tuple(compute_canvas(RESOLUTION_ARRAY, DISPLAY_OFFSET_ARRAY))
combined_image = Image.new("RGB", canvas_tuple_fin, color=0)
combined_image.load()
# for i in range(len(cropped_images)):
# combined_image.paste(cropped_images[i], DISPLAY_OFFSET_ARRAY[i])
for crp_id in cropped_images:
combined_image.paste(cropped_images[crp_id], DISPLAY_OFFSET_ARRAY[crp_id])
# Saving combined image
outputfile, outputfile_old = alternating_outputfile(profile.name)
combined_image.save(outputfile, quality=95) # set quality if jpg is used, png unaffected
if profile.name == G_ACTIVE_PROFILE or force:
set_wallpaper(outputfile, force)
if os.path.exists(outputfile_old):
os.remove(outputfile_old)
return 0
def set_multi_image_wallpaper(profile, force):
"""Sets a distinct image on each monitor.
Since most platforms only support setting a single image
as the wallpaper this has to be accomplished by creating a
composite image based on the monitor offsets and then setting
the resulting image as the wallpaper.
"""
files = profile.next_wallpaper_files()
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(str(files))
img_resized = []
for file, res in zip(files, RESOLUTION_ARRAY):
# image = Image.open(file)
try:
image = Image.open(file)
except UnidentifiedImageError:
sp_logging.G_LOGGER.info(("Opening image '%s' failed with PIL.UnidentifiedImageError."
"It could be corrupted or is of foreign type."), file)
img_resized.append(resize_to_fill(image, res))
canvas_tuple = tuple(compute_canvas(RESOLUTION_ARRAY, DISPLAY_OFFSET_ARRAY))
combined_image = Image.new("RGB", canvas_tuple, color=0)
combined_image.load()
for i in range(len(files)):
combined_image.paste(img_resized[i], DISPLAY_OFFSET_ARRAY[i])
outputfile, outputfile_old = alternating_outputfile(profile.name)
combined_image.save(outputfile, quality=95) # set quality if jpg is used, png unaffected
if profile.name == G_ACTIVE_PROFILE or force:
set_wallpaper(outputfile, force)
if os.path.exists(outputfile_old):
os.remove(outputfile_old)
return 0
def errcheck(result, func, args):
"""Error getter for Windows."""
if not result:
raise ctypes.WinError(ctypes.get_last_error())
def set_wallpaper(outputfile, force=False):
"""
Master method to set the composed image as wallpaper.
After the final background image is created, this method
is called to communicate with the host system to set the
desktop background. For Linux hosts there is a separate method.
"""
pltform = platform.system()
if pltform == "Windows":
spi_setdeskwallpaper = 20
spif_update_ini_file = 1
spif_send_change = 2
user32 = ctypes.WinDLL('user32', use_last_error=True)
spiw = user32.SystemParametersInfoW
spiw.argtypes = [
ctypes.c_uint,
ctypes.c_uint,
ctypes.c_void_p,
ctypes.c_uint]
spiw.restype = ctypes.c_int
spiw.errcheck = errcheck
spi_success = spiw(
spi_setdeskwallpaper,
0,
outputfile,
spif_update_ini_file | spif_send_change)
if spi_success == 0:
sp_logging.G_LOGGER.info("SystemParametersInfo wallpaper set failed with \
spi_success: '%s'", spi_success)
elif pltform == "Linux":
set_wallpaper_linux(outputfile, force)
elif pltform == "Darwin":
script = """/usr/bin/osascript<<END
tell application "Finder"
set desktop picture to POSIX file "%s"
end tell
END"""
subprocess.Popen(script % outputfile, shell=True)
else:
sp_logging.G_LOGGER.info("Unknown platform.system(): %s", pltform)
return 0
def set_wallpaper_linux(outputfile, force=False):
"""
Wallpaper setter for Linux hosts.
Functionality is based on the DESKTOP_SESSION environment variable,
if it is not set, like often on window managers such as i3, the default
behavior is to attempt to use feh as the communication layer with the
desktop.
On systems where the variable is set, a native way of setting the
wallpaper can be used. These are DE specific.
"""
file = "file://" + outputfile
set_command = G_SET_COMMAND_STRING
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info(file)
desk_env = os.environ.get("DESKTOP_SESSION")
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("DESKTOP_SESSION is: '%s'", desk_env)
if desk_env:
if set_command != "":
if set_command == "feh":
sp_logging.G_LOGGER.info("Using 'feh' command mode!")
subprocess.run(["feh", "--bg-scale", "--no-xinerama", outputfile])
else:
command_string_list = set_command.split()
formatted_command = []
for term in command_string_list:
formatted_command.append(term.format(image=outputfile))
sp_logging.G_LOGGER.info("Formatted custom command is: '%s'", formatted_command)
subprocess.run(formatted_command)
elif desk_env in ["gnome", "gnome-wayland", "gnome-xorg",
"unity", "ubuntu",
"pantheon", "budgie-desktop",
"pop"]:
subprocess.run(["gsettings", "set",
"org.gnome.desktop.background", "picture-uri",
file])
elif desk_env in ["cinnamon"]:
subprocess.run(["gsettings", "set",
"org.cinnamon.desktop.background", "picture-uri",
file])
elif desk_env in ["mate"]:
subprocess.run(["gsettings",
"set",
"org.mate.background",
"picture-filename",
outputfile])
elif desk_env in ["xfce", "xubuntu", "ubuntustudio"]:
xfce_actions(outputfile)
elif desk_env.lower() == "lubuntu" or "lxqt" in desk_env.lower():
try:
subprocess.run(["pcmanfm", "-w", outputfile])
except OSError:
try:
subprocess.run(["pcmanfm-qt", "-w", outputfile])
except OSError:
sp_logging.G_LOGGER.info("Exception: failure to find either command \
'pcmanfm' or 'pcmanfm-qt'. Exiting.")
sys.exit(1)
# elif desk_env in ["/usr/share/xsessions/plasma", "plasma"]:
elif running_kde():
kdeplasma_actions(outputfile, force)
elif "i3" in desk_env or desk_env in ["/usr/share/xsessions/bspwm"]:
subprocess.run(["feh", "--bg-scale", "--no-xinerama", outputfile])
else:
if set_command == "":
message = "Your DE could not be detected to set the wallpaper. \
You need to set the 'set_command' option in your \
settings file superpaper/general_settings. Exiting."
sp_logging.G_LOGGER.info(message)
show_message_dialog(message, "Error")
sys.exit(1)
else:
os.system(set_command.format(image=outputfile))
else:
sp_logging.G_LOGGER.info("DESKTOP_SESSION variable is empty, \
attempting to use feh to set the wallpaper.")
subprocess.run(["feh", "--bg-scale", "--no-xinerama", outputfile])
def set_wallpaper_piecewise(image_piece_list):
"""
Wallpaper setter that takes already cropped images and sets them
directly to corresponding monitors on systems where wallpapers
are set on a monitor by monitor basis.
This is used when the quick wallpaper change conditions are met,
see quick_profile_job method, to improve performance on these
systems.
Currently supported such systems are KDE Plasma and XFCE.
"""
pltform = platform.system()
if pltform == "Linux":
desk_env = os.environ.get("DESKTOP_SESSION")
# if desk_env in ["/usr/share/xsessions/plasma", "plasma"]:
if running_kde():
kdeplasma_actions(None, image_piece_list)
elif desk_env in ["xfce", "xubuntu", "ubuntustudio"]:
xfce_actions(None, image_piece_list)
else:
pass
return 0
def special_image_cropper(outputfile):
"""
Crops input image into monitor specific pieces based on display offsets.
This is needed on systems where the wallpapers are set on a per display basis.
This means that the composed image needs to be re-cut into pieces which
are saved separately.
"""
# file needs to be split into monitor pieces since KDE/XFCE are special
img = Image.open(outputfile)
outputname = os.path.splitext(outputfile)[0]
img_names = []
crop_id = 0
for res, offset in zip(RESOLUTION_ARRAY, DISPLAY_OFFSET_ARRAY):
left = offset[0]
top = offset[1]
right = left + res[0]
bottom = top + res[1]
crop_tuple = (left, top, right, bottom)
cropped_img = img.crop(crop_tuple)
fname = outputname + "-crop-" + str(crop_id) + ".png"
img_names.append(fname)
cropped_img.save(fname, "PNG")
crop_id += 1
return img_names
def remove_old_temp_files(outputfile):
"""
This method looks for previous temp images and deletes them.
Currently only used to delete the monitor specific crops that are
needed for KDE and XFCE.
"""
opbase = os.path.basename(outputfile)
opname = os.path.splitext(opbase)[0]
# print(opname)
oldfileid = ""
if opname.endswith("-a"):
newfileid = "-a"
oldfileid = "-b"
# print(oldfileid)
elif opname.endswith("-b"):
newfileid = "-b"
oldfileid = "-a"
# print(oldfileid)
else:
pass
if oldfileid:
# Must take care than only temps of current profile are deleted.
profilename = opname.strip()[:-2]
match_string = profilename + oldfileid + "-crop"
match_string = match_string.strip()
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Removing images matching with: '%s'",
match_string)
for temp_file in os.listdir(TEMP_PATH):
if match_string in temp_file:
# print(temp_file)
os.remove(os.path.join(TEMP_PATH, temp_file))
def kdeplasma_actions(outputfile, image_piece_list = None, force=False):
"""
Sets the multi monitor wallpaper on KDE.
Arguments are path to an image and an optional image piece
list when one can set the wallpaper from existing cropped
images. IF image pieces are to be used, call this method
with outputfile == None.
This is needed since KDE uses its own scripting language to
set the desktop background which sets a single image on every
monitor. This means that the composed image must be cut into
correct pieces that then are set to their respective displays.
"""
script = """
// make an array of all desktops with a valid screen
var desktopArray = [];
for(var desktopIndex in desktops()) {{
var desktop = desktops()[desktopIndex];
if(desktop.screen != -1) {{
desktopArray.push(desktop);
}}
}}
// sort the array based on the (vertical) desktop position
var i = 1;
while(i < desktopArray.length) {{
var j = i;
while(j > 0 && screenGeometry(desktopArray[j-1].screen).top > screenGeometry(desktopArray[j].screen).top) {{
var temp = desktopArray[j];
desktopArray[j] = desktopArray[j-1];
desktopArray[j-1] = temp;
j = j-1;
}}
i = i+1;
}}
// sort the array based on the (horizontal) desktop position
var i = 1;
while(i < desktopArray.length) {{
var j = i;
while(j > 0 && screenGeometry(desktopArray[j-1].screen).left > screenGeometry(desktopArray[j].screen).left) {{
var temp = desktopArray[j];
desktopArray[j] = desktopArray[j-1];
desktopArray[j-1] = temp;
j = j-1;
}}
i = i+1;
}}
var imageFileArray = Array({imagelist});
// set the desired wallpaper
var k = 0;
while(k < desktopArray.length) {{
var desktop = desktopArray[k];
desktop.wallpaperPlugin = "org.kde.image";
desktop.currentConfigGroup = Array("Wallpaper", "org.kde.image", "General");
desktop.writeConfig("Image", imageFileArray[k]);
k = k+1;
}}
"""
profname = None
if outputfile:
profname = os.path.splitext(os.path.basename(outputfile))[0][:-2]
img_names = special_image_cropper(outputfile)
elif not outputfile and image_piece_list:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("KDE: Using image piece list!")
img_names = image_piece_list
else:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Error! KDE actions called without arguments!")
filess_img_names = []
for fname in img_names:
filess_img_names.append("file://" + fname)
filess_img_names_str = ', '.join('"' + item + '"' for item in filess_img_names)
# print(script.format(imagelist=filess_img_names_str))
sessionb = dbus.SessionBus()
plasma_interface = dbus.Interface(
sessionb.get_object(
"org.kde.plasmashell",
"/PlasmaShell"),
dbus_interface="org.kde.PlasmaShell")
if profname == G_ACTIVE_PROFILE or image_piece_list or force:
plasma_interface.evaluateScript(
script.format(imagelist=filess_img_names_str)
)
# Delete old images after new ones are set
if outputfile:
remove_old_temp_files(outputfile)
def xfce_actions(outputfile, image_piece_list = None):
"""
Sets the multi monitor wallpaper on XFCE.
This is needed since XFCE uses its own scripting interface to
set the desktop background which sets a single image on every
monitor. This means that the composed image must be cut into
correct pieces that then are set to their respective displays.
"""
if outputfile:
img_names = special_image_cropper(outputfile)
elif not outputfile and image_piece_list:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("XFCE: Using image piece list!")
img_names = image_piece_list
else:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Error! XFCE actions called without arguments!")
monitors = []
for mon_index in range(NUM_DISPLAYS):
monitors.append("monitor" + str(mon_index))
read_prop = subprocess.Popen(["xfconf-query",
"-c",
"xfce4-desktop",
"-p",
"/backdrop",
"-l"],
stdout=subprocess.PIPE)
props = read_prop.stdout.read().decode("utf-8").split("\n")
for prop in props:
for monitor, imgname in zip(monitors, img_names):
if monitor in prop:
if "last-image" in prop or "image-path" in prop:
os.system(
"xfconf-query -c xfce4-desktop -p "
+ prop
+ " -s ''")
os.system(
"xfconf-query -c xfce4-desktop -p "
+ prop
+ " -s '%s'" % imgname)
if "image-show" in prop:
os.system(
"xfconf-query -c xfce4-desktop -p "
+ prop
+ " -s 'true'")
# Delete old images after new ones are set
if outputfile:
remove_old_temp_files(outputfile)
def change_wallpaper_job(profile, force=False):
"""Centralized wallpaper method that calls setter algorithm based on input prof settings.
When force, skip the profile name check
"""
with G_WALLPAPER_CHANGE_LOCK:
if profile.spanmode.startswith("single") and profile.ppimode is False:
thrd = Thread(target=span_single_image_simple, args=(profile, force), daemon=True)
thrd.start()
elif ((profile.spanmode.startswith("single") and profile.ppimode is True) or
profile.spanmode.startswith("advanced")):
thrd = Thread(target=span_single_image_advanced, args=(profile, force), daemon=True)
thrd.start()
elif profile.spanmode.startswith("multi"):
thrd = Thread(target=set_multi_image_wallpaper, args=(profile, force), daemon=True)
thrd.start()
else:
sp_logging.G_LOGGER.info("Unkown profile spanmode: %s", profile.spanmode)
return None
return thrd
def run_profile_job(profile):
"""This method executes the input profile as the profile is configured."""
global G_ACTIVE_DISPLAYSYSTEM
# get_display_data() # Check here so new profile has fresh data.
refresh_display_data() # Refresh available display data.
repeating_timer = None
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("running profile job with profile: %s", profile.name)
if not profile.slideshow:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Running a one-off wallpaper change.")
thrd = change_wallpaper_job(profile)
elif profile.slideshow:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Running wallpaper slideshow.")
thrd = change_wallpaper_job(profile)
repeating_timer = RepeatedTimer(
profile.delay_list[0], change_wallpaper_job, profile)
return (repeating_timer, thrd)
def quick_profile_job(profile):
"""
At startup and profile change, switch to old temp wallpaper.
Since the image processing takes some time, in order to carry
out actions quickly at startup or at user request, set the old
temp image of the requested profile as the wallpaper.
"""
with G_WALLPAPER_CHANGE_LOCK:
# Look for old temp image:
files = [i for i in os.listdir(TEMP_PATH)
if os.path.isfile(os.path.join(TEMP_PATH, i))
and (i.startswith(profile.name + "-a") or
i.startswith(profile.name + "-b"))]
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("quickswitch file lookup: %s", files)
if files:
image_pieces = [os.path.join(TEMP_PATH, i) for i in files
if "-crop-" in i]
if use_image_pieces() and image_pieces:
image_pieces.sort()
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Use wallpaper crop pieces: %s",
image_pieces)
thrd = Thread(target=set_wallpaper_piecewise,
args=(image_pieces,),
daemon=True)
thrd.start()
else:
thrd = Thread(target=set_wallpaper,
args=(os.path.join(TEMP_PATH, files[0]),),
daemon=True)
thrd.start()
else:
if sp_logging.DEBUG:
sp_logging.G_LOGGER.info("Old file for quickswitch was not found. %s",
files)
def use_image_pieces():
"""Determine if it improves perfomance to use existing image pieces.
Systems that use image pieces are: KDE, XFCE.
"""
pltform = platform.system()
if pltform == "Linux":
desk_env = os.environ.get("DESKTOP_SESSION")
# if desk_env in ["/usr/share/xsessions/plasma", "plasma"]:
if running_kde():
return True
elif desk_env in ["xfce", "xubuntu", "ubuntustudio"]:
return True
else:
return False
else:
return False
|
bzhou26/CMPUT403
|
kattis done/allaboutthatbase/allaboutthatbase.py
|
'''
Kattis ID: < allaboutthatbase >
Topic: (Arithmetic)
Level: 1 point
Brief problem description:
we interpret the number 7234572345 as:
7×10^4+2×10^3+3×10^2+4×10^1+5×10^0.
Your job is to determine the bases in which given arithmetic
expressions are valid. We define an expression as valid in
base BB if two conditions are true.
First, all the operands used are interpretable in base BB
as having values in the decimal range [1,232−1][1,232−1].
Second, the expression is true. Any arbitrary expression might
be valid in zero, one, or more bases. In this problem we will
only consider bases 11–3636, where base 11 is unary.
Solution Summary:
Creat 2 dictionary for transfering from char to number
and number to char.
1. Find proper minimum base number by finding the highest value of
characters in the three numbers.
2. Calculate each expression based on the base number (guess from
minumum base number to 36), then check if it is valid.
3. If you have found at least one valid base number, after that the
next base number you guess is invalid, then break.
Used Resources: None
I hereby certify that I have produced the following solution myself
using the resources listed above in accordance with the CMPUT 403
collaboration policy.
--- <NAME>
'''
import sys
inverse_base = {1:"1",
2:"2",
3:"3",
4:"4",
5:"5",
6:"6",
7:"7",
8:"8",
9:"9",
10:"a",
11:"b",
12:"c",
13:"d",
14:"e",
15:"f",
16:"g",
17:"h",
18:"i",
19:"j",
20:"k",
21:"l",
22:"m",
23:"n",
24:"o",
25:"p",
26:"q",
27:"r",
28:"s",
29:"t",
30:"u",
31:"v",
32:"w",
33:"x",
34:"y",
35:"z",
36:"0"}
base = {"1":1,
"2":2,
"3":3,
"4":4,
"5":5,
"6":6,
"7":7,
"8":8,
"9":9,
"a":10,
"b":11,
"c":12,
"d":13,
"e":14,
"f":15,
"g":16,
"h":17,
"i":18,
"j":19,
"k":20,
"l":21,
"m":22,
"n":23,
"o":24,
"p":25,
"q":26,
"r":27,
"s":28,
"t":29,
"u":30,
"v":31,
"w":32,
"x":33,
"y":34,
"z":35,
"0":0}
def valid(n1,op,n2,n3,base_number):
global base
i=0
j=0
k=0
value1=0
value2=0
value3=0
while(i<len(n1)):
value1+=base.get(n1[i])*(base_number**(len(n1)-i-1))
i+=1
while(j<len(n2)):
value2+=base.get(n2[j])*(base_number**(len(n2)-j-1))
j+=1
if (op=="/"):
result = value1/value2
elif (op=="*"):
result = value1*value2
elif (op=="+"):
result = value1+value2
elif (op=="-"):
result = value1-value2
while(k<len(n3)):
value3+=base.get(n3[k])*(base_number**(len(n3)-k-1))
k+=1
if result == value3:
return 1
else:
return 0
def oneCheck(n1,n2,n3):
for char in n1+n2+n3:
if char != "1":
return 0
return 1
def findMinBase(n1,n2,n3):
one_check = oneCheck(n1,n2,n3)
if one_check == 1:
return 1
global inverse_base
value = 0
for char in n1+n2+n3:
if base.get(char)>value:
value = base.get(char)
return value+1
def main():
global inverse_base
#file_name = "a.in"
#f = open(file_name,'r')
next(sys.stdin)
for line in sys.stdin:
#print(line)
count = 0
read_list=line.split()
base_number = findMinBase(read_list[0],read_list[2],read_list[4])
#base_number = 0
while(base_number<37):
check = valid(read_list[0], read_list[1], read_list[2], read_list[4], base_number)
if check == 1:
count+=1
print(inverse_base.get(base_number), end='')
elif check == 0 and count != 0:
break;
base_number+=1
if count == 0:
print("invalid",end='')
print()
#if __name__=="__main__":
main()
|
bzhou26/CMPUT403
|
contest/0122/1.py
|
<filename>contest/0122/1.py
import itertools
for i in itertools.permutations('abcd',4):
print (''.join(i))
|
bzhou26/CMPUT403
|
contest/0122/sums.py
|
import sys
from itertools import permutations
#def choose(l):
#global cont
#if cont = len(object)
def main():
file_list = []
num_set = []
query_set = []
distance = set()
cont=0
alldis=set()
#for line in sys.stdin:
#n = int(line);
#i=0
#while(i<n):
#for line in sys.stdin:
#num_set.append(int(line))
#i += 1
#for line in sys.stdin:
filename="F.0.in"
f = open(filename,"r")
for line in f:
file_list.append(int(line))
i=0
pos = 0
n=0
m=0
while (pos < len(file_list)):
if i == 0:
n = file_list[pos]
pos += 1
while (i<n):
num_set.append(str(file_list[pos]))
i+=1
pos+=1
i=0
m = file_list[pos]
pos += 1
while (i<m):
query_set.append(file_list[pos])
i+=1
pos +=1
i=0
##
ii=0
num_string = "".join(num_set)
distance = (list(permutations(num_string)))
for c in distance:
alldis.add(abs(int(c[1])-int(c[2])))
mindis=
iii = 0
for p in alldis:
if abs(p - query_set[iii])
main()
|
bzhou26/CMPUT403
|
contest/aa.py
|
<reponame>bzhou26/CMPUT403<filename>contest/aa.py
import sys
def main():
kase = int(input())
while (kase>0):
result = set()
kase -= 1;
num = int(input())
while (num > 0):
num -= 1
dis = input()
result.add(dis)
print(len(result))
#if __name__=="__main__":
main()
|
ramadhanqa/tugas3_datmining
|
analisa-kprototype.py
|
#!/usr/bin/env python
import numpy as np
from kmodes.kprototypes import KPrototypes
# stocks with their market caps, sectors and countries
syms = np.genfromtxt('data-prototype.csv', dtype=str, delimiter=',')[:, 0]
X = np.genfromtxt('data-prototype.csv', dtype=object, delimiter=',')[:, 1:]
X[:, 0] = X[:, 0].astype(float)
kproto = KPrototypes(n_clusters=4, init='Cao', verbose=2)
clusters = kproto.fit_predict(X, categorical=[1, 2])
# Print cluster centroids of the trained model.
print(kproto.cluster_centroids_)
# Print training statistics
print(kproto.cost_)
print(kproto.n_iter_)
for s, c in zip(syms, clusters):
print("Symbol: {}, cluster:{}".format(s, c))
|
ludeeus/custom_component_myfitnesspal
|
custom_components/my_fitnesspal/config_flow.py
|
<reponame>ludeeus/custom_component_myfitnesspal<gh_stars>10-100
import voluptuous as vol
import logging
from collections import OrderedDict
from datetime import datetime, date
from homeassistant import config_entries
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.util import slugify
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(DOMAIN)
class RecyclingFlowHandler(config_entries.ConfigFlow):
"""Config flow for myfitnespal component."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self) -> None:
"""Initialize myfitnesspal configuration flow."""
self._errors = {}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
self._errors = {}
if user_input is not None:
is_ok = await self._check_user(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
if is_ok:
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
else:
self._errors["base"] = "wrong_credentials"
return await self._show_config_form(
user_input[CONF_NAME],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
)
return await self._show_config_form()
async def _show_config_form(
self, name: str = None, user_name: str = None, password: str = None
):
"""Show the configuration form to edit location data."""
data_schema = OrderedDict()
data_schema[vol.Required(CONF_NAME, default=name)] = str
data_schema[vol.Required(CONF_USERNAME, default=user_name)] = str
data_schema[vol.Required(CONF_PASSWORD, default=password)] = str
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=self._errors
)
async def _check_user(self, user_name: str, password: str) -> bool:
"""Return true if location is ok."""
self._flow_user_name = user_name
self._flow_password = password
# Sadly a sync lib
return await self.hass.async_add_executor_job(self._sync_make_api_call)
def _sync_make_api_call(self) -> bool:
"""syncronous call to the api"""
import myfitnesspal as ext_myfitnesspal
try:
today = date.today()
client = ext_myfitnesspal.Client(self._flow_user_name, self._flow_password)
info = client.get_date(today.year, today.month, today.day)
if info is not None:
return True
except:
# The API will throw an exception if faulty location
pass
return False
|
ludeeus/custom_component_myfitnesspal
|
custom_components/my_fitnesspal/const.py
|
"""Constants in myfitnesspal component."""
DOMAIN = "my_fitnesspal"
ATTRIBUTION = "Myfitnesspal.com"
NAME = "MyFitnessPal"
DEFAULT_NAME = "myfitnesspal"
ICON = "mdi:run-fast"
VERSION = "0.0.1"
ISSUE_URL = "https://github.com/helto4real/custom_component_myfitnesspal/issues"
SENSOR = "sensor"
PLATFORMS = [SENSOR]
STARTUP_MESSAGE = f"""
-------------------------------------------------------------------
{NAME}
Version: {VERSION}
This is a custom integration!
If you have any issues with this you need to open an issue here:
{ISSUE_URL}
-------------------------------------------------------------------
"""
|
shawwn/tensorboard
|
tensorboard/dataclass_compat_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tensorboard.dataclass_compat`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from google.protobuf import message
from tensorboard import dataclass_compat
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.compat.proto import event_pb2
from tensorboard.compat.proto import graph_pb2
from tensorboard.compat.proto import node_def_pb2
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.audio import summary as audio_summary
from tensorboard.plugins.graph import metadata as graphs_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.histogram import summary as histogram_summary
from tensorboard.plugins.hparams import metadata as hparams_metadata
from tensorboard.plugins.hparams import summary_v2 as hparams_summary
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.plugins.scalar import summary as scalar_summary
from tensorboard.util import tensor_util
from tensorboard.util import test_util
tf.compat.v1.enable_eager_execution()
class MigrateEventTest(tf.test.TestCase):
"""Tests for `migrate_event`."""
def _migrate_event(self, old_event, initial_metadata=None):
"""Like `migrate_event`, but performs some sanity checks."""
if initial_metadata is None:
initial_metadata = {}
old_event_copy = event_pb2.Event()
old_event_copy.CopyFrom(old_event)
new_events = dataclass_compat.migrate_event(
old_event, initial_metadata=initial_metadata
)
for event in new_events: # ensure that wall time and step are preserved
self.assertEqual(event.wall_time, old_event.wall_time)
self.assertEqual(event.step, old_event.step)
return new_events
def test_irrelevant_event_passes_through(self):
old_event = event_pb2.Event()
old_event.file_version = "brain.Event:wow"
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 1)
self.assertIs(new_events[0], old_event)
def test_unknown_summary_passes_through(self):
old_event = event_pb2.Event()
value = old_event.summary.value.add()
value.metadata.plugin_data.plugin_name = "magic"
value.metadata.plugin_data.content = b"123"
value.tensor.CopyFrom(tensor_util.make_tensor_proto([1, 2]))
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 1)
self.assertIs(new_events[0], old_event)
def test_already_newstyle_summary_passes_through(self):
# ...even when it's from a known plugin and would otherwise be migrated.
old_event = event_pb2.Event()
old_event.summary.ParseFromString(
scalar_summary.pb(
"foo", 1.25, display_name="bar", description="baz"
).SerializeToString()
)
metadata = old_event.summary.value[0].metadata
metadata.data_class = summary_pb2.DATA_CLASS_TENSOR # note: not scalar
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 1)
self.assertIs(new_events[0], old_event)
def test_doesnt_add_metadata_to_later_steps(self):
old_events = []
for step in range(3):
e = event_pb2.Event()
e.step = step
summary = scalar_summary.pb("foo", 0.125)
if step > 0:
for v in summary.value:
v.ClearField("metadata")
e.summary.ParseFromString(summary.SerializeToString())
old_events.append(e)
initial_metadata = {}
new_events = []
for e in old_events:
migrated = self._migrate_event(e, initial_metadata=initial_metadata)
new_events.extend(migrated)
self.assertLen(new_events, len(old_events))
self.assertEqual(
{
e.step
for e in new_events
for v in e.summary.value
if v.HasField("metadata")
},
{0},
)
def test_scalar(self):
old_event = event_pb2.Event()
old_event.step = 123
old_event.wall_time = 456.75
old_event.summary.ParseFromString(
scalar_summary.pb(
"foo", 1.25, display_name="bar", description="baz"
).SerializeToString()
)
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 1)
self.assertLen(new_events[0].summary.value, 1)
value = new_events[0].summary.value[0]
tensor = tensor_util.make_ndarray(value.tensor)
self.assertEqual(tensor.shape, ())
self.assertEqual(tensor.item(), 1.25)
self.assertEqual(
value.metadata.data_class, summary_pb2.DATA_CLASS_SCALAR
)
self.assertEqual(
value.metadata.plugin_data.plugin_name, scalar_metadata.PLUGIN_NAME
)
def test_histogram(self):
old_event = event_pb2.Event()
old_event.step = 123
old_event.wall_time = 456.75
histogram_pb = histogram_summary.pb(
"foo",
[1.0, 2.0, 3.0, 4.0],
bucket_count=12,
display_name="bar",
description="baz",
)
old_event.summary.ParseFromString(histogram_pb.SerializeToString())
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 1)
self.assertLen(new_events[0].summary.value, 1)
value = new_events[0].summary.value[0]
tensor = tensor_util.make_ndarray(value.tensor)
self.assertEqual(tensor.shape, (12, 3))
np.testing.assert_array_equal(
tensor, tensor_util.make_ndarray(histogram_pb.value[0].tensor)
)
self.assertEqual(
value.metadata.data_class, summary_pb2.DATA_CLASS_TENSOR
)
self.assertEqual(
value.metadata.plugin_data.plugin_name,
histogram_metadata.PLUGIN_NAME,
)
def test_audio(self):
logdir = self.get_temp_dir()
steps = (0, 1, 2)
with test_util.FileWriter(logdir) as writer:
for step in steps:
event = event_pb2.Event()
event.step = step
event.wall_time = 456.75 * step
audio = tf.reshape(
tf.linspace(0.0, 100.0, 4 * 10 * 2), (4, 10, 2)
)
audio_pb = audio_summary.pb(
"foo",
audio,
labels=["one", "two", "three", "four"],
sample_rate=44100,
display_name="bar",
description="baz",
)
writer.add_summary(
audio_pb.SerializeToString(), global_step=step
)
files = os.listdir(logdir)
self.assertLen(files, 1)
event_file = os.path.join(logdir, files[0])
loader = event_file_loader.RawEventFileLoader(event_file)
input_events = [event_pb2.Event.FromString(x) for x in loader.Load()]
new_events = []
initial_metadata = {}
for input_event in input_events:
migrated = self._migrate_event(
input_event, initial_metadata=initial_metadata
)
new_events.extend(migrated)
self.assertLen(new_events, 4)
self.assertEqual(new_events[0].WhichOneof("what"), "file_version")
for step in steps:
with self.subTest("step %d" % step):
new_event = new_events[step + 1]
self.assertLen(new_event.summary.value, 1)
value = new_event.summary.value[0]
tensor = tensor_util.make_ndarray(value.tensor)
self.assertEqual(
tensor.shape, (3,)
) # 4 clipped to max_outputs=3
self.assertStartsWith(tensor[0], b"RIFF")
self.assertStartsWith(tensor[1], b"RIFF")
if step == min(steps):
metadata = value.metadata
self.assertEqual(
metadata.data_class,
summary_pb2.DATA_CLASS_BLOB_SEQUENCE,
)
self.assertEqual(
metadata.plugin_data.plugin_name,
audio_metadata.PLUGIN_NAME,
)
else:
self.assertFalse(value.HasField("metadata"))
def test_hparams(self):
old_event = event_pb2.Event()
old_event.step = 0
old_event.wall_time = 456.75
hparams_pb = hparams_summary.hparams_pb({"optimizer": "adam"})
# Simulate legacy event with no tensor content
for v in hparams_pb.value:
v.ClearField("tensor")
old_event.summary.CopyFrom(hparams_pb)
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 1)
self.assertLen(new_events[0].summary.value, 1)
value = new_events[0].summary.value[0]
self.assertEqual(value.tensor, hparams_metadata.NULL_TENSOR)
self.assertEqual(
value.metadata.data_class, summary_pb2.DATA_CLASS_TENSOR
)
self.assertEqual(
value.metadata.plugin_data,
hparams_pb.value[0].metadata.plugin_data,
)
def test_graph_def(self):
# Create a `GraphDef` and write it to disk as an event.
logdir = self.get_temp_dir()
writer = test_util.FileWriter(logdir)
graph_def = graph_pb2.GraphDef()
graph_def.node.add(name="alice", op="Person")
graph_def.node.add(name="bob", op="Person")
graph_def.node.add(
name="friendship", op="Friendship", input=["alice", "bob"]
)
writer.add_graph(graph=None, graph_def=graph_def, global_step=123)
writer.flush()
# Read in the `Event` containing the written `graph_def`.
files = os.listdir(logdir)
self.assertLen(files, 1)
event_file = os.path.join(logdir, files[0])
self.assertIn("tfevents", event_file)
loader = event_file_loader.RawEventFileLoader(event_file)
events = [event_pb2.Event.FromString(x) for x in loader.Load()]
self.assertLen(events, 2)
self.assertEqual(events[0].WhichOneof("what"), "file_version")
self.assertEqual(events[1].WhichOneof("what"), "graph_def")
old_event = events[1]
new_events = self._migrate_event(old_event)
self.assertLen(new_events, 2)
self.assertIs(new_events[0], old_event)
new_event = new_events[1]
self.assertEqual(new_event.WhichOneof("what"), "summary")
self.assertLen(new_event.summary.value, 1)
tensor = tensor_util.make_ndarray(new_event.summary.value[0].tensor)
self.assertEqual(
new_event.summary.value[0].metadata.data_class,
summary_pb2.DATA_CLASS_BLOB_SEQUENCE,
)
self.assertEqual(
new_event.summary.value[0].metadata.plugin_data.plugin_name,
graphs_metadata.PLUGIN_NAME,
)
self.assertEqual(tensor.shape, (1,))
new_graph_def_bytes = tensor[0]
self.assertIsInstance(new_graph_def_bytes, bytes)
self.assertGreaterEqual(len(new_graph_def_bytes), 16)
new_graph_def = graph_pb2.GraphDef.FromString(new_graph_def_bytes)
self.assertProtoEquals(graph_def, new_graph_def)
if __name__ == "__main__":
tf.test.main()
|
bionanoimaging/cellSTORM-pyTorch
|
train.py
|
<gh_stars>1-10
import time
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
"""
This module builds a standard pix2pix image-to-image GAN based on the work of
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
We aim to recover blurred, compressed and noisy images from dSTORM acquisition
coming from a cellphone. It accepts image pairs (.png) A-to-B where the images
are concated vertically.
# HOW TO USE? (command line)
#### BIG DATASE
cd /home/diederich/Documents/STORM/PYTHON/pytorch-CycleGAN-and-pix2pix/
# start server
python -m visdom.server &
python train.py \
--dataroot /home/diederich/Documents/STORM/DATASET_NN/02_Datapairs/MOV_2018_03_06_11_43_47_randomBlink2500_lines_ISO6400_texp_1_125testSTORM_4000frames_2500emitter_dense_256px_params_png_frames_shifted_combined \
--ndf 32 \
--ngf 32 \
--which_model_netG unet_256 \
--model pix2pix \
--which_direction AtoB \
--dataset_mode aligned \
--norm batch \
--pool_size 0 \
--save_latest_freq 1000 \
--batchSize 4 \
--input_nc 1 \
--output_nc 1 \
--gpu_ids 0,1 \
--loadSize 256 \
--fineSize 256 \
--lr 0.0001 \
--beta1 0.5 \
--display_freq 100 \
--name random_blink_psf_bkgr_nocheckerboard_gtpsf_V5_shifted_UNET_lambda_A_1000_lambda_cGAN_0.5_ISO_6400_random_lines \
--lambda_A 1000 \
--lambda_cGAN 2 \
--no_lsgan \
"""
opt = TrainOptions().parse()
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
total_steps = 0
for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
epoch_iter = 0
for i, data in enumerate(dataset):
iter_start_time = time.time()
visualizer.reset()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
model.set_input(data)
model.optimize_parameters()
if total_steps % opt.display_freq == 0:
save_result = total_steps % opt.update_html_freq == 0
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
if opt.display_id > 0:
visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
model.save(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
model.update_learning_rate()
|
bionanoimaging/cellSTORM-pyTorch
|
test_multipleVideo.py
|
import time
import os
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util import html
import numpy as np
import tifffile
from PIL import Image
import glob, os
"""
This module builds a standard pix2pix image-to-image GAN based on the work of
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
We aim to recover blurred, compressed and noisy images from dSTORM acquisition
coming from a cellphone. It accepts image pairs (.png) A-to-B where the images
are concated vertically.
WARNING: It saves the images to TIFF stacks (BIG TIFF - always concatening the files to the end )
# HOW TO USE? (command line)
cd /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/PYTHON/cellSTORM_pytorch/
python test.py \
--dataroot /home/diederich/Documents/STORM/DATASET_NN/04_UNPROCESSED_RAW_HW/2018-01-23_17.53.21_oldSample_ISO3200_10xEypiece_texp_1_30_256 \
--ndf 32 \
--ngf 32 \
--which_model_netG unet_256 \
--dataset_mode aligned \
--norm batch \
--input_nc 1 \
--output_nc 1 \
--gpu_ids 0,1 \
--loadSize 256 \
--fineSize 256 \
--name random_blink_psf_bkgr_nocheckerboard_gtpsf_V5_shifted_UNET \
--how_many 100000
##############
for reconstructing a video:
cd /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/PYTHON/cellSTORM_pytorch/
python test.py \
--dataroot /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/MATLAB/Alex_Images_Vergleich/Stack/TestSNR_Compression_nphotons_1000_compression_10.m4v \
--which_direction AtoB \
--dataset_mode aligned \
--norm batch \
--no_dropout \
--ndf 64 --ngf 64 \
--name alltogether_2 \
--how_many 50000 \
--which_model_netG unet_256 \
--is_video 1 \
--roisize 256 \
--xcenter 128 \
--ycenter 128
# x/ycenter are the center coordinates around the roi with size roisize is cropped out
##############
for reconstructing a batch of videos:
cd /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/PYTHON/cellSTORM_pytorch/
python test.py \
--dataroot /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/MATLAB/Alex_Images_Vergleich/Stack/ \ # stack of all video files
--which_direction AtoB \
--dataset_mode aligned \
--norm batch \
--no_dropout \
--ndf 64 --ngf 64 \
--name alltogether_2 \
--how_many 50000 \
--which_model_netG unet_256 \
--is_video 1 \
--roisize 256 \
--xcenter 128 \
--ycenter 128
# x/ycenter are the center coordinates around the roi with size roisize is cropped out
"""
# define input parameters
opt = TestOptions().parse()
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.which_direction = 'AtoB'
opt.finesize = opt.padwidth*2+opt.roisize
opt.loadsize = opt.padwidth*2+opt.roisize
opt.how_many = 1000000
allvideos_dataroot = opt.dataroot
for file in os.listdir(allvideos_dataroot):
if file.endswith(".m4v"):
opt.dataroot = os.path.join(allvideos_dataroot, file)
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
# accept only grayscale images
opt.input_nc = 1
opt.output_nc = 1
# create filedir according to the filename
dataroot_name = opt.dataroot.split('/')[-1]
myfile_dir = ('./myresults/' + dataroot_name + '_' + opt.name)
if not os.path.exists(myfile_dir):
os.makedirs(myfile_dir)
# create filenames
realA_filename = myfile_dir + '/realA.tiff'
realB_filename = myfile_dir + '/realB.tiff'
fakeB_filename = myfile_dir + '/fakeB.tiff'
# test
for i, data in enumerate(dataset):
if i >= opt.how_many:
break
model.set_input(data)
model.test()
visuals = model.get_current_visuals()
_, i_filename = os.path.split("".join(data['B_paths']))
print(str(i)+': process image... name: ' + i_filename)
# realA
name_realA = visuals.iteritems().next()[0]
val_realA = visuals.iteritems().next()[1]
val_realA = np.squeeze(val_realA[:,:,0])
tifffile.imsave(realA_filename, val_realA, append=True, bigtiff=True)
# fakeB
name_fakeB = visuals.items()[1][0]
val_fakeB = visuals.items()[1][1]
val_fakeB = np.squeeze(val_fakeB[:,:,0])
tifffile.imsave(fakeB_filename, val_fakeB, append=True, bigtiff=True)
# realB
if not(opt.is_video):
name_realB = visuals.items()[2][0]
val_realB = visuals.items()[2][1]
val_realB = np.squeeze(val_realB[:,:,0])
tifffile.imsave(realB_filename, val_realB, append=True, bigtiff=True)
|
bionanoimaging/cellSTORM-pyTorch
|
data/aligned_dataset.py
|
<gh_stars>1-10
import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
import numpy as np
import scipy.ndimage
class AlignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_AB = os.path.join(opt.dataroot, opt.phase)
self.AB_paths = sorted(make_dataset(self.dir_AB))#,key=lambda x: int(os.path.splitext(x)[0]))
self.AB_paths.sort(key=lambda f: int(filter(str.isdigit, f)))
assert(opt.resize_or_crop == 'resize_and_crop')
def __getitem__(self, index):
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
#AB = AB.resize((self.opt.loadSize * 2, self.opt.loadSize), Image.BICUBIC)
AB = transforms.ToTensor()(AB)
w_total = AB.size(2)
w = int(w_total / 2)
A = AB[:, :, 0:w]
B = AB[:, :, w-1:-1]
A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
A = tmp.unsqueeze(0)
tmp = B[0, ...] * 0.299 + B[1, ...] * 0.587 + B[2, ...] * 0.114
B = tmp.unsqueeze(0)
return {'A': A, 'B': B,
'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
return len(self.AB_paths)
def name(self):
return 'AlignedDataset'
|
bionanoimaging/cellSTORM-pyTorch
|
test.py
|
<reponame>bionanoimaging/cellSTORM-pyTorch
import time
import os
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
from util.visualizer import Visualizer
from util import html
import numpy as np
import tifffile
from PIL import Image
"""
This module builds a standard pix2pix image-to-image GAN based on the work of
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
We aim to recover blurred, compressed and noisy images from dSTORM acquisition
coming from a cellphone. It accepts image pairs (.png) A-to-B where the images
are concated vertically.
WARNING: It saves the images to TIFF stacks (BIG TIFF - always concatening the files to the end )
# HOW TO USE? (command line)
cd /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/PYTHON/cellSTORM_pytorch/
python test.py \
--dataroot /home/diederich/Documents/STORM/DATASET_NN/04_UNPROCESSED_RAW_HW/2018-01-23_17.53.21_oldSample_ISO3200_10xEypiece_texp_1_30_256 \
--ndf 32 \
--ngf 32 \
--which_model_netG unet_256 \
--dataset_mode aligned \
--norm batch \
--input_nc 1 \
--output_nc 1 \
--gpu_ids 0,1 \
--loadSize 256 \
--fineSize 256 \
--name random_blink_psf_bkgr_nocheckerboard_gtpsf_V5_shifted_UNET \
--how_many 100000
##############
for reconstructing a video:
cd /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/PYTHON/cellSTORM_pytorch/
python test.py \
--dataroot /media/useradmin/Data/Benedict/Dropbox/Dokumente/Promotion/PROJECTS/STORM/MATLAB/Alex_Images_Vergleich/Stack/TestSNR_Compression_nphotons_1000_compression_10.m4v \
--which_direction AtoB \
--dataset_mode aligned \
--norm batch \
--no_dropout \
--ndf 64 --ngf 64 \
--name alltogether_2 \
--how_many 50000 \
--which_model_netG unet_256 \
--is_video 1 \
--roisize 256 \
--xcenter 128 \
--ycenter 128
# x/ycenter are the center coordinates around the roi with size roisize is cropped out
"""
# define input parameters
opt = TestOptions().parse()
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
opt.which_direction = 'AtoB'
opt.finesize = opt.padwidth*2+opt.roisize
opt.loadsize = opt.padwidth*2+opt.roisize
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
# accept only grayscale images
opt.input_nc = 1
opt.output_nc = 1
# create filedir according to the filename
dataroot_name = opt.dataroot.split('/')[-1]
myfile_dir = ('./myresults/' + dataroot_name + '_' + opt.name)
if not os.path.exists(myfile_dir):
os.makedirs(myfile_dir)
# create filenames
realA_filename = myfile_dir + '/realA.tiff'
realB_filename = myfile_dir + '/realB.tiff'
fakeB_filename = myfile_dir + '/fakeB.tiff'
# test
for i, data in enumerate(dataset):
if i >= opt.how_many:
break
model.set_input(data)
model.test()
visuals = model.get_current_visuals()
_, i_filename = os.path.split("".join(data['B_paths']))
print(str(i)+': process image... name: ' + i_filename)
# realA
name_realA = visuals.iteritems().next()[0]
val_realA = visuals.iteritems().next()[1]
val_realA = np.squeeze(val_realA[:,:,0])
tifffile.imsave(realA_filename, val_realA, append=True, bigtiff=True)
# fakeB
name_fakeB = visuals.items()[1][0]
val_fakeB = visuals.items()[1][1]
val_fakeB = np.squeeze(val_fakeB[:,:,0])
tifffile.imsave(fakeB_filename, val_fakeB, append=True, bigtiff=True)
# realB
if not(opt.is_video):
name_realB = visuals.items()[2][0]
val_realB = visuals.items()[2][1]
val_realB = np.squeeze(val_realB[:,:,0])
tifffile.imsave(realB_filename, val_realB, append=True, bigtiff=True)
|
bionanoimaging/cellSTORM-pyTorch
|
data/video_dataset.py
|
<filename>data/video_dataset.py
import os.path
import random
import torchvision.transforms as transforms
import torch
from data.base_dataset import BaseDataset
from data.image_folder import make_dataset
from PIL import Image
import skvideo.io
import matplotlib as plt
import numpy as np
class VideoDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.dir_AB = opt.dataroot
# open videoreader
self.videogen = skvideo.io.vreader(self.dir_AB )
# define roisize and center where each frame will be extracted
self.roisize = opt.roisize #512
self.padwidth = opt.padwidth
self.xcenter = opt.xcenter
self.ycenter = opt.ycenter
def __getitem__(self, index):
# assign dummy variable
AB_path = 'video'
# read frame
frame = self.videogen.next()
# if no center is chosen, select the videos center
if self.xcenter == -1:
self.xcenter = int(frame.shape[0]/2)
print('New xcenter: ' + str(self.xcenter))
if self.ycenter == -1:
self.ycenter = int(frame.shape[1]/2)
print('New ycenter: ' + str(self.ycenter))
if self.roisize == -1:
self.roisize = int(np.min(frame.shape[0:1]))
print('New roisize: ' + str(self.roisize))
# crop frame to ROI
frame_crop = frame[self.xcenter-self.roisize/2:self.xcenter+self.roisize/2, self.ycenter-self.roisize/2:self.ycenter+self.roisize/2,:]
npad = ((self.padwidth, self.padwidth), (self.padwidth, self.padwidth), (0, 0))
frame_pad = np.pad(frame_crop, npad, 'reflect')
# convert from NP to PIL
A = Image.fromarray(frame_pad).convert('RGB')
A = transforms.ToTensor()(A)
# normalize
A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114
A = tmp.unsqueeze(0)
B = A
return {'A': A, 'B': B,
'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
videometadata = skvideo.io.ffprobe(self.dir_AB)
#print(videometadata)
#print(self.dir_AB)
num_frames = np.int(videometadata['video']['@nb_frames'])
return num_frames
def name(self):
return 'VideoDataset'
|
bionanoimaging/cellSTORM-pyTorch
|
models/models.py
|
def create_model(opt):
model = None
print(opt.model)
assert(opt.dataset_mode == 'aligned')
from .pix2pix_model import Pix2PixModel
model = Pix2PixModel()
model.initialize(opt)
print("model [%s] was created" % (model.name()))
return model
|
qizhidong/xmind2testlink
|
xmind2testlink/xray.py
|
<filename>xmind2testlink/xray.py<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
import requests
import json
import time
from xmind2testlink.datatype import TestCase
class XrayIssue:
def __init__(self, x_acpt, jira_token, xray_client_id=None, xray_client_key=None):
self.xray_headers = {
'X-acpt': x_acpt,
'Content-Type': 'application/json;charset=UTF-8',
}
self.jira_headers = {
'Authorization': 'Basic ' + jira_token,
'Content-Type': 'application/json',
}
self.folder_id = {}
self.project_id = {
'KC': '10012',
'KE': '10005',
'KM': '10028',
'KI': '10024',
'MDX': '10023',
}
self.is_smoketest = {
True: 'Yes',
False: 'No',
}
self.is_need_quard = {
True: 'Yes',
False: 'No',
}
self.testcase_type = {
'主流程用例': '主流程用例',
'分支用例': '分支用例',
'UED用例': 'UED用例',
'波及功能用例': '波及功能用例',
}
self.srcum_team_jira_type = 'customfield_10089'
self.xray_client_id = xray_client_id
self.xray_client_key = xray_client_key
if self.xray_client_id is not None and self.xray_client_key is not None:
self.xray_token = self.xray_auth()
self.bulk_xray_headers = {
'Authorization': 'Bearer ' + self.xray_token,
'Content-Type': 'application/json;charset=UTF-8',
}
def xray_auth(self):
auth_url = 'https://xray.cloud.xpand-it.com/api/v1/authenticate'
auth_payload = {
'client_id': self.xray_client_id,
'client_secret': self.xray_client_key,
}
res = requests.post(auth_url, json=auth_payload)
return json.loads(res.content)
def generate_bulk_json(self, project_name_key, issue_name, test_case, link_issue_key,
components, is_smoketest, is_need_quard, testcase_type, forder):
importance_list = [0, 1, 2, 3]
importance = 3 if int(test_case.importance) not in importance_list else int(test_case.importance)
issue_name = str(issue_name).replace('\r\n', '').replace('\r', '').replace('\n', '')
link_issue_scrum_team_id = self.get_issue_scrum_team_id(link_issue_key)
steps = []
for step in test_case.steps:
step_json = dict()
step_json['action'] = step.action
step_json['data'] = ''
step_json['result'] = step.expected
steps.append(step_json)
bulk_json = {
'testtype': 'Manual',
'fields': {
'summary': issue_name,
'project': {'key': project_name_key},
'priority': {'name': 'P' + str(importance)},
'description': 'example of manual test',
'issuetype': {'name': 'Test'},
'components': [],
'assignee': [],
'customfield_10137': {'value': self.is_smoketest[is_smoketest]},
'customfield_10139': {'value': self.testcase_type[testcase_type]},
self.srcum_team_jira_type: {'id': link_issue_scrum_team_id},
'customfield_10145': {'value': self.is_need_quard[is_need_quard]},
},
'update': {
'issuelinks': [
{
'add': {
'type': {
'name': 'Test'
},
'outwardIssue': {
'key': link_issue_key,
}
}
}
]
},
'steps': steps,
'xray_test_repository_folder': forder,
}
if project_name_key == "KC":
bulk_json['fields']['components'].append({'name': components})
bulk_json['fields']['assignee'].append({'id': '5ac2e1fc09ee392b905c0972'})
return bulk_json
def bulk_xray_issue(self, bulk_json_arr):
bulk_url = 'https://xray.cloud.xpand-it.com/api/v1/import/test/bulk'
try:
self.xray_token = self.xray_auth()
res = requests.post(bulk_url, json=bulk_json_arr, headers=self.bulk_xray_headers)
return json.loads(res.content).get('jobId')
except Exception as e:
print('Bulk import xray issue failed {}'.format(e))
def check_bulk_issue_status(self, job_id):
"""
:param job_id: bulk issue status
:return:
{
"status": "successful",
"result": {
"errors": [],
"issues": [
{
"elementNumber": 0,
"id": "62372",
"key": "KC-6410",
"self": "https://olapio.atlassian.net/rest/api/2/issue/62372"
}
],
"warnings": []
}
}
"""
check_bulk_url = 'https://xray.cloud.xpand-it.com/api/v1/import/test/bulk/{}/status'.format(job_id)
try:
res = requests.get(check_bulk_url, headers=self.bulk_xray_headers)
except Exception:
self.xray_token = self.xray_auth()
res = requests.get(check_bulk_url, headers=self.bulk_xray_headers)
return json.loads(res.content)
def await_import_bulk_xray_issue(self, job_id):
finished_status = ['successful', 'failed', 'unsuccessful']
res = self.check_bulk_issue_status(job_id)
while res.get('status') not in finished_status:
print('Import status is {}, not finished, wait 20 second'.format(res.get('status')))
time.sleep(20)
res = self.check_bulk_issue_status(job_id)
print('Import finished, status is {}'.format(res.get('status')))
return res
def create_xray_issue(self, project_name_key, issue_name, importance, link_issue_key,
components=None, is_smoketest=False, is_need_quard=False, testcase_type='主流程用例'):
url = "https://olapio.atlassian.net/rest/api/2/issue"
importance_list = [0, 1, 2, 3]
if int(importance) not in importance_list:
importance = 3
issue_name = str(issue_name).replace('\r\n', '').replace('\r', '').replace('\n', '')
link_issue_scrum_team_id = self.get_issue_scrum_team_id(link_issue_key)
payload = {
"fields": {
"project": {"key": project_name_key},
"summary": issue_name,
"priority": {"name": "P" + str(importance)},
"description": "example of manual test",
"issuetype": {"name": "Test"},
'components': [],
'assignee': [],
'customfield_10137': {'value': self.is_smoketest[is_smoketest]},
'customfield_10139': {'value': self.testcase_type[testcase_type]},
self.srcum_team_jira_type: {'id': link_issue_scrum_team_id},
'customfield_10145': {'value': self.is_need_quard[is_need_quard]},
}
}
if project_name_key == "KC":
payload['fields']['components'].append({'name': components})
payload['fields']['assignee'].append({'id': '5ac2e1fc09ee392b905c0972'})
response = requests.request("POST", url, headers=self.jira_headers, data=json.dumps(payload))
if response.status_code >= 400:
print('创建issue 状态码为{}'.format(response.status_code))
print('create jira issue failed, {}'.format(response.content.decode(encoding='utf-8')))
print(response.json()['key'])
print('成功创建了xray issue https://olapio.atlassian.net/browse/' + response.json()['key'])
return response.json()['id'], response.json()['key']
# 2. 给issue新增step, 替换url中的id
def create_xray_issue_step(self, key, index, action, data, result):
create_step_url = 'https://xray.cloud.xpand-it.com/api/internal/test/' + key + '/step'
data = {"id": "-1", "index": index, "customFields": [], "action": action, "data": data, "result": result}
response = requests.post(create_step_url, headers=self.xray_headers, data=json.dumps(data))
if response.status_code == 500:
print(response.json()['error'])
exit(1)
# else:
# print('创建步骤成功')
def create_xray_full_issue(self, project_name_key, issue_name, test_case, link_issue_key,
components, is_smoketest, is_need_quard, testcase_type):
# test_case = TestCase(test_case)
(issue_id, issue_key) = self.create_xray_issue(project_name_key, issue_name, test_case.importance,
link_issue_key, components, is_smoketest,
is_need_quard, testcase_type)
self.link_issue(link_issue_key, issue_key)
# self.get_folder_id(project_name_key)
for i in range(len(test_case.steps)):
step = test_case.steps[i]
self.create_xray_issue_step(issue_id, i, step.action, '', step.expected)
# self.move_issue_to_folder(issue_id, project_name_key, components)
return issue_id
def move_issue_to_folder(self, issue_ids, project_name_key, components):
move_url = 'https://xray.cloud.xpand-it.com/api/internal/test-repository/move-tests-to-folder'
data = {
'folderId': self.folder_id[components],
'issueIds': issue_ids,
'skipTestValidation': False,
'projectId': self.project_id[project_name_key],
}
response = requests.post(move_url, headers=self.xray_headers, data=json.dumps(data))
print(response.status_code)
if response.status_code >= 400:
print(response.content)
def get_folder_id(self, project_name_key):
get_folder_url = 'https://xray.cloud.xpand-it.com/api/internal/test-repository'
data = {
'projectId': self.project_id[project_name_key],
}
response = requests.post(get_folder_url, headers=self.xray_headers, data=json.dumps(data))
print(response.status_code)
if response.status_code >= 400:
print(response.content)
for folder in json.loads(response.content).get('folders'):
self.folder_id[folder.get('name')] = folder.get('folderId')
def link_issue(self, origin_key, xray_key):
url = 'https://olapio.atlassian.net/rest/api/2/issueLink'
# payload = {"type": {"id": "10006"}, "inwardIssue": {"key": "KE-12706"}, "outwardIssue": {"key": "QUARD-263"}}
payload = {"type": {"id": "10006"}, "inwardIssue": {"key": origin_key}, "outwardIssue": {"key": xray_key}}
response = requests.request("POST", url, headers=self.jira_headers, data=json.dumps(payload))
# return response.json()['id']
def get_issue_scrum_team_id(self, issue_key):
res = self.get_issue_info(issue_key)
if self.srcum_team_jira_type not in res.get('fields').keys():
print('{} has not scrum team property, please add it')
raise None
return res.get('fields').get(self.srcum_team_jira_type).get('id')
def get_issue_info(self, issue_key):
url = "https://olapio.atlassian.net/rest/api/2/issue/{}".format(issue_key)
payload = {}
response = requests.request("GET", url, headers=self.jira_headers, data=payload)
return json.loads(response.content)
# create_xray_full_issue()
if __name__ == '__main__':
X_acpt = ''
xray_headers = {
'X-acpt': X_acpt,
'Content-Type': 'application/json;charset=UTF-8',
}
jira_token = ''
xray_issue = XrayIssue(X_acpt, jira_token,
'',
'')
res = xray_issue.xray_auth()
print(res)
# project_name_key = 'QUARD'
# issue_name = 'test_issue'
# test_case = ''
# link_issue_key = ''
# xray_issue.create_xray_full_issue(project_name_key, issue_name, test_case, link_issue_key)
|
qizhidong/xmind2testlink
|
main.py
|
"""
A tool to parse xmind file into testlink xml file, which will help
you generate a testlink recognized xml file, then you can import it
into testlink as test suites.
Usage:
xmind2testlink [path_to_xmind_file] [-json]
Example:
xmind2testlink C:\\tests\\testcase.xmind => output xml
xmind2testlink C:\\tests\\testcase.xmind -json => output json
"""
import json
import sys, argparse, time, os, traceback
from xmind2testlink.testlink_parser import to_testlink_xml_file
from xmind2testlink.xmind_parser import xmind_to_suite, xmind_to_flat_dict
from xmind2testlink.xray import XrayIssue
csv_title = {
'Name': [],
'Objective': [],
'Precondition': [],
'Folder': [],
'Status': [],
'Priority': [],
'Component': [],
'Owner': [],
'Estimated Time': [],
'Labels': [],
'Coverage (Issues)': [],
'Test Script (Step-by-Step) - Step': [],
'Test Script (Step-by-Step) - Test Data': [],
'Test Script (Step-by-Step) - Expected Result': [],
}
def xmind_to_testlink(xmind):
xml_out = xmind[:-5] + 'xml'
suite = xmind_to_suite(xmind)
to_testlink_xml_file(suite, xml_out)
return xml_out
def xmind_to_json(xmind):
json_out = xmind[:-5] + 'json'
with open(json_out, 'w', encoding='utf8') as f:
f.write(json.dumps(xmind_to_flat_dict(xmind), indent=2))
return json_out
def get_issue_key(test_case_name):
chn_index = str(test_case_name).find(':')
en_index = str(test_case_name).find(':')
if chn_index == -1 and en_index == -1:
issue_key_index = -1
elif chn_index == -1:
issue_key_index = en_index
elif en_index == -1:
issue_key_index = chn_index
else:
issue_key_index = min(chn_index, en_index)
if issue_key_index == -1:
link_issue_key = ''
else:
link_issue_key = str(test_case_name)[:issue_key_index]
return link_issue_key
def get_compenent(test_suite_name):
return str(test_suite_name).split('/')[-1]
def generate_csv_title(xmind):
index = str(xmind).find('.', len(xmind)-10)
csv_file = ''.join((str(xmind)[:index], '.csv'))
with open(csv_file, 'w+') as f:
for num, key in enumerate(csv_title.keys()):
f.write(key)
if num != len(csv_title) - 1:
f.write(',')
else:
f.write('\n')
return csv_file
def generate_tm4j_csv(csv_file, title_name, test_case, issue_key, component):
for key in csv_title.keys():
csv_title[key] = []
csv_title['Name'].append(title_name)
csv_title['Folder'].append(component)
csv_title['Status'].append('Draft')
if test_case.importance == 1:
csv_title['Priority'].append('High')
elif test_case.importance == 2:
csv_title['Priority'].append('Normal')
else:
csv_title['Priority'].append('Low')
csv_title['Component'].append(component)
csv_title['Coverage (Issues)'].append(issue_key)
for step in test_case.steps:
csv_title['Test Script (Step-by-Step) - Step'].append(''.join(('"', step.action, '"')))
csv_title['Test Script (Step-by-Step) - Expected Result'].append(''.join(('"', step.expected, '"')))
with open(csv_file, 'a+') as f:
for i in range(len(test_case.steps)):
for j, key in enumerate(csv_title.keys()):
if len(csv_title[key]) > i:
f.write(str(csv_title[key][i]))
if j != len(csv_title) - 1:
f.write(',')
else:
f.write('\n')
def main(xacpt, jira_token, project_name_key, xmind, is_smoketest,
is_need_quard, testcase_type, folder_name=None, error_case_file=None,
xray_client_id=None, xray_client_key=None):
# xacpt = ''
# jira_token = '<PASSWORD>'
# project_name_key = 'QUARD'
# xmind = '/Users/wei.zhou/Documents/4x版本迭代/spirnt06/Kyligence Enterprise-sprint06.xmind'
suite = xmind_to_suite(xmind)
xray_issue = XrayIssue(xacpt, jira_token, xray_client_id, xray_client_key)
if xacpt != '':
xray_issue.get_folder_id(project_name_key)
# csv_file = generate_csv_title(xmind)
error_case_file = '/tmp/error_case_file.txt' if error_case_file is None else error_case_file
if os.path.isdir(error_case_file):
error_case_file = os.path.join(error_case_file, 'error_case_file.txt')
if os.path.exists(error_case_file):
os.remove(error_case_file)
for test_suit in suite.sub_suites:
if xray_client_id is None and xray_client_key is None and xacpt != '':
components = test_suit.name
for test_case in test_suit.testcase_list:
test_case_name = test_case.name
title_name = test_suit.name + ' > ' + test_case_name
# generate_tm4j_csv(csv_file, title_name, test_case, get_issue_key(test_case_name), sub_title)
try:
issue_id = xray_issue.create_xray_full_issue(project_name_key, title_name, test_case,
get_issue_key(test_case_name), components,
is_smoketest, is_need_quard, testcase_type)
except Exception:
issue_id = None
with open(error_case_file, 'a+') as f:
f.write('{} create failed\n'.format(test_case_name))
traceback.format_exc()
if issue_id:
forder_name = components if folder_name is None else folder_name
xray_issue.move_issue_to_folder([issue_id], project_name_key, forder_name)
elif xray_client_id is not None and xray_client_key is not None:
components = get_compenent(test_suit.name)
bulk_json_arr = []
for test_case in test_suit.testcase_list:
test_case_name = test_case.name
title_name = components + ' > ' + test_case_name
bulk_json = xray_issue.generate_bulk_json(project_name_key, title_name, test_case,
get_issue_key(test_case_name), components,
is_smoketest, is_need_quard, testcase_type, test_suit.name)
bulk_json_arr.append(bulk_json)
job_id = xray_issue.bulk_xray_issue(bulk_json_arr)
print(job_id)
result = xray_issue.await_import_bulk_xray_issue(job_id)
print(result)
if result.get('result').get('errors'):
with open(error_case_file, 'a+') as f:
for error in result.get('result').get('errors'):
f.write('{} create failed. '.format(bulk_json_arr[error.get('elementNumber')].
get('fields').get('summary')))
f.write('The reason is {}!\n'.format(error.get('errors')))
print()
def xmindtest(xmind):
xmind_to_suite(xmind)
def init_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--xacpt', required=False,
help="访问 https://olapio.atlassian.net/browse/QUARD-277 =》浏览器按F12 或者右击检查=> 搜索 "
"`testStepFields` 对应的请求(Request headers)字段X-acpt对应的值")
parser.add_argument('--token', required=True,
help="默认使用代码者的KEY,建议改成自己的,通过jira 链接 https://id.atlassian.com/manage-profile"
"/security/api-tokens 申请到自己的token,在base64编码 https://www.blitter.se"
"/utils/basic-authentication-header-generator")
parser.add_argument('--project', required=True,
help="默认使用KE,访问 https://olapio.atlassian.net/projects 拿到对应项目的key")
parser.add_argument('--xmind', required=True,
help="你的xmind的文件的全路径。for example:/Users/wei.zhou/Documents/4x版本迭代/spirnt06/Kyligence "
"Enterprise-sprint06.xmind")
parser.add_argument('--cid', required=False,
help="询问jira管理员(永强),创建对应xray client id和对应的xray client key")
parser.add_argument('--ckey', required=False,
help="询问jira管理员(永强),创建对应xray client id和对应的xray client key")
args = parser.parse_args()
return args
if __name__ == '__main__':
# ARG = init_argument()
# xacpt = ARG.xacpt if ARG.xacpt else ''
# jira_token = ARG.token
# project_name_key = ARG.project
# xmind = ARG.xmind
# client_id = ARG.cid if ARG.cid else None
# client_key = ARG.ckey if ARG.ckey else None
xacpt = ''
jira_token = ''
client_id = None
client_key = None
# KC KE KI MDX
project_name_key = 'KE'
xmind = ''
# True False
is_smoketest = False
is_need_quard = False
# '主流程用例' '分支用例' 'UED用例' '波及功能用例'
testcase_type = '主流程用例'
# 在test repository下某个目录的名称
folder_name = None
error_case_file = None
main(xacpt, jira_token, project_name_key, xmind, is_smoketest,
is_need_quard, testcase_type, folder_name, error_case_file,
client_id, client_key)
# local_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
# print(local_time)
|
qizhidong/xmind2testlink
|
xmind2testlink/main.py
|
<filename>xmind2testlink/main.py
"""
A tool to parse xmind file into testlink xml file, which will help
you generate a testlink recognized xml file, then you can import it
into testlink as test suites.
Usage:
xmind2testlink [path_to_xmind_file] [-json]
Example:
xmind2testlink C:\\tests\\testcase.xmind => output xml
xmind2testlink C:\\tests\\testcase.xmind -json => output json
"""
import json
import sys, argparse
from xmind2testlink.testlink_parser import to_testlink_xml_file
from xmind2testlink.xmind_parser import xmind_to_suite, xmind_to_flat_dict
from xmind2testlink.xray import xray_issue
def xmind_to_testlink(xmind):
xml_out = xmind[:-5] + 'xml'
suite = xmind_to_suite(xmind)
to_testlink_xml_file(suite, xml_out)
return xml_out
def xmind_to_json(xmind):
json_out = xmind[:-5] + 'json'
with open(json_out, 'w', encoding='utf8') as f:
f.write(json.dumps(xmind_to_flat_dict(xmind), indent=2))
return json_out
def get_issue_key(test_case_name):
chn_index = str(test_case_name).find(':')
en_index = str(test_case_name).find(':')
if chn_index == -1 and en_index == -1:
issue_key_index = -1
elif chn_index == -1:
issue_key_index = en_index
elif en_index == -1:
issue_key_index = chn_index
else:
issue_key_index = min(chn_index, en_index)
if issue_key_index == -1:
link_issue_key = ''
else:
link_issue_key = str(test_case_name)[:issue_key_index]
return link_issue_key
def main(xacpt, jira_token, project_name_key, xmind):
# xacpt = ''
# jira_token = '<PASSWORD>'
# project_name_key = 'QUARD'
# xmind = '/Users/wei.zhou/Documents/4x版本迭代/spirnt06/Kyligence Enterprise-sprint06.xmind'
suite = xmind_to_suite(xmind)
for test_suit in suite.sub_suites:
sub_title = test_suit.name
for test_case in test_suit.testcase_list:
test_case_name = test_case.name
title_name = sub_title + ' > ' + test_case_name
xray_issue.create_xray_full_issue(project_name_key, title_name,
test_case, get_issue_key(test_case_name), jira_token,
xacpt)
# for test_case in test_suit
print()
def init_argument():
parser = argparse.ArgumentParser()
parser.add_argument('--xacpt', required=True,
help="访问 https://olapio.atlassian.net/browse/QUARD-277 =》浏览器按F12 或者右击检查=> 搜索 `testStepFields` 对应的请求(Request headers)字段X-acpt对应的值")
parser.add_argument('--token', default="<KEY>
help="默认使用代码者的KEY,建议改成自己的,通过jira 链接 https://id.atlassian.com/manage-profile/security/api-tokens 申请到自己的token,在base64编码 https://www.blitter.se/utils/basic-authentication-header-generator")
parser.add_argument('--project', default='KE',
help="默认使用KE,访问 https://olapio.atlassian.net/projects 拿到对应项目的key")
parser.add_argument('--xmind', required=True,
help="你的xmind的文件的全路径。for example:/Users/wei.zhou/Documents/4x版本迭代/spirnt06/Kyligence Enterprise-sprint06.xmind")
args = parser.parse_args()
return args
if __name__ == '__main__':
# <KEY>
ARG = init_argument()
xacpt = ARG.xacpt
jira_token = ARG.token
project_name_key = ARG.project
xmind = ARG.xmind
main(xacpt, jira_token, project_name_key, xmind)
|
klmp200/Pandoc-Converter
|
PandocConverter.py
|
<reponame>klmp200/Pandoc-Converter<gh_stars>0
# -*- coding: utf-8 -*-
# @Author: klmp200
# @Date: 2016-03-17 19:08:11
# @Last Modified by: klmp200
# @Last Modified time: 2016-03-19 16:30:02
import sublime
import sublime_plugin
import subprocess
import re
import os
class PandocConverterCommand(sublime_plugin.TextCommand):
"""
Provide necessary in order to get the file converted
and opened
"""
# Convert the file and open it
def run(self, edit, output):
# Define some variables
self.select_pandoc()
self.detect_input_format(output)
# Set working directory to let add images
work_dir = os.path.dirname(self.view.file_name())
os.chdir(work_dir)
# Launch command line and open file
proc = subprocess.Popen(self.build_command_line(output),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
error = proc.communicate()[1]
if proc.returncode == 0:
self.open(output)
else:
self.show_errors(error)
# Show errors in sublime
def show_errors(self, error):
sublime.active_window().run_command("show_panel", {
"panel": "console", "toggle": True})
print(error)
# Build conversion command line
def build_command_line(self, output):
command_line = []
self.build_output_name(output)
base_command = [
self.pandoc,
"-f", self.input_format,
self.view.file_name(),
"-o", self.ofile
]
for command in base_command:
command_line.append(command)
for arg in output["pandoc-arguments"]:
command_line.append(arg)
return command_line
# Build output name
def build_output_name(self, output):
old_name = self.view.file_name()
new_name = re.sub(r'(?<=\.)([A-Za-z0-9]+)$',
output["output-format"], old_name)
self.ofile = new_name
# Open the file
def open(self, output):
oformat = output["output-format"]
if oformat in _s("pandoc-format-file"):
try:
if sublime.platform() == 'osx':
subprocess.call(['open', self.ofile])
elif sublime.paltform() == 'windows':
os.startfile(self.ofile)
elif sublime.platform() == 'linux':
subprocess.call(['xdg-open', self.ofile])
except:
sublime.message_dialog('Wrote to file ' + self.ofile)
else:
sublime.active_window().open_file(self.ofile).set_syntax_file(
output["syntax_file"])
# Define Pandoc path
def select_pandoc(self):
pandoc_path = _s("pandoc-path")[sublime.platform()]
if pandoc_path == '':
sublime.message_dialog('Warning : Pandoc path not defined')
pandoc_path = 'pandoc'
self.pandoc = pandoc_path
# Define format of the converted file
def detect_input_format(self, output):
scopes = re.split('\s', self.view.scope_name(0))
self.input_format = output["scope"][scopes[0]]
class PandocConverterPanelCommand(sublime_plugin.WindowCommand):
"""
Display informations in quick panel and let the user
chose output settings
"""
# Main function
def run(self):
self.view = self.window.active_view()
self.window.show_quick_panel(self.get_list(), self.convert)
# Generate a list of available outputs
def get_list(self):
avaliables_outputs = []
outputs = _s("outputs")
for output in _s("outputs"):
added = False
for scope in outputs[output]["scope"]:
if self.view.score_selector(0, scope) and not added:
avaliables_outputs.append(output)
added = True
self.outputs = avaliables_outputs
return avaliables_outputs
# Launch conversion
def convert(self, i):
if i != -1:
choice = _s("outputs")[self.outputs[i]]
self.view.run_command('pandoc_converter', {
"output": choice,
})
# Allow to access easily to settings
def _s(key):
settings = sublime.load_settings("PandocConverter.sublime-settings")
return settings.get(key, {})
|
msjz/ArabCoders-Workshop
|
rename files/rename_files.py
|
<gh_stars>0
import os
import string
def rename_files():
files_list = os.listdir(r"D:\Study\1MAC\test\prank")
saved_path = os.getcwd()
print "Current working directory is: " + saved_path
os.chdir(r"D:\Study\1MAC\test\prank")
for file_name in files_list:
os.rename(file_name, file_name.translate(None, "0123456789"))
os.chdir(saved_path)
rename_files()
|
dineshpanchal93/helpremove
|
account_report/config/account_report.py
|
<gh_stars>0
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Sanket"),
"items": [
{
"type": "doctype",
"name": "Test",
"description": _("For Test.")
},
{
"type": "doctype",
"name": "overtime",
"description": _("For Test.")
}
]
},
{
"label": _("Accounts Report"),
"items": [
{
"type": "report",
"name":"sanket",
"doctype": "Test",
"is_query_report": True,
},
]
},
{
"label": _("Help"),
"icon": "fa fa-facetime-video",
"items": [
{
"type": "help",
"label": _("Chart of Accounts"),
"youtube_id": "DyR-DST-PyA"
},
{
"type": "help",
"label": _("Opening Accounting Balance"),
"youtube_id": "kdgM20Q-q68"
},
{
"type": "help",
"label": _("Setting up Taxes"),
"youtube_id": "nQ1zZdPgdaQ"
}
]
}
]
|
dineshpanchal93/helpremove
|
account_report/config/hr.py
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Over Time"),
"items": [
{
"type": "doctype",
"name": "overtime",
"description": _("For Overtime.")
},
{
"type": "report",
"name": "Monthly Overtime",
"doctype": "overtime",
"is_query_report": True,
"label": _("Monthly Overtime Report"),
"color": "green",
"icon": "octicon octicon-file-directory"
},
{
"type": "doctype",
"name": "Overtime Approved",
"description": _("For Overtime Approved.")
}
]
}
]
|
dineshpanchal93/helpremove
|
account_report/config/desktop.py
|
<filename>account_report/config/desktop.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "HR",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module",
"hidden": 1
}
]
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/data/word2vec_dataset.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import codecs
import random
import collections
import tensorflow as tf
import PlatformNlp.tokenization as tokenization
from PlatformNlp.data.base_dataset import BaseDataset
from PlatformNlp.data import register_dataset
class InputExample(object):
"""A single training/test example for word2vec model."""
def __init__(self, guid, tokens):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text: string. The untokenized text of the sequence.
"""
self.guid = guid
self.tokens = tokens
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
label_ids,
is_real_example=True):
self.input_ids = input_ids
self.label_ids = label_ids
self.is_real_example = is_real_example
@register_dataset("word2vec")
class Word2vecDataset(BaseDataset):
"""Loader for MultiClass dDataset"""
def __init__(self, args):
self.args = args
self.max_seq_length = 200 if not args.max_seq_length or args.max_seq_length <= 0 else args.max_seq_length
def build_dataset(self, args, tokenizer):
set_type = args.type
data_file = args.data_file
label_file = args.label_file
output_file = args.output_file
if not os.path.exists(data_file):
raise FileExistsError("{} does not exists!!!".format(data_file))
if os.path.exists(output_file):
os.remove(output_file)
all_lines = []
with codecs.open(data_file, "r", 'utf-8', errors='ignore') as f:
lines = []
for line in f:
line = line.strip('\n')
line = line.strip("\r")
line = tokenization.convert_to_unicode(line)
tokens = tokenizer.tokenize(line)
if set_type == "train":
if len(tokens) < (2 * args.skip_window + 1):
continue
if len(tokens) > args.max_seq_length:
tokens = tokens[:args.max_seq_length]
if len(tokens) <= (2 * args.skip_window + 1):
continue
lines.append(tokens)
shuffle_index = list(range(len(lines)))
random.shuffle(shuffle_index)
for i in range(len(lines)):
shuffle_i = shuffle_index[i]
line_i = lines[shuffle_i]
all_lines.append(line_i)
del lines
examples = []
for (i, line) in enumerate(all_lines):
# Only the test set has a header
guid = "%s-%s" % (set_type, i)
examples.append(
InputExample(guid=guid, tokens=line))
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = self.convert_single_example(ex_index, example, args, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_str_feature(value):
if isinstance(value, str):
value = bytes(value, encoding='utf-8')
f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def builder(self, tfrecord_file, is_training, batch_size, drop_remainder, args):
name_to_features = {
"input_ids": tf.VarLenFeature(dtype=tf.int64),
"label_ids": tf.VarLenFeature(dtype=tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
args.name_to_features = name_to_features
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
# batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(tfrecord_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def convert_single_example(self, ex_index, example, args, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
tokens = []
tokens_a = example.tokens
tokens.extend(tokens_a)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
if self.args.type != "train":
ids = input_ids
labels = input_ids
feature = InputFeatures(
input_ids=ids,
label_ids=labels,
is_real_example=True)
return feature
num_skips = args.num_skips
skip_window = args.skip_window
while skip_window > 1 and len(input_ids) <= (2 * skip_window + 1):
skip_window = int(skip_window / 2)
if skip_window <= 1:
return None
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
data_index = 0
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(input_ids[data_index])
data_index = (data_index + 1) % len(input_ids)
ids = []
labels = []
for i in range(len(input_ids) // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
ids.append(buffer[skip_window])
labels.append(buffer[target])
buffer.append(input_ids[data_index])
data_index = (data_index + 1) % len(input_ids)
feature = InputFeatures(
input_ids=ids,
label_ids=labels,
is_real_example=True)
return feature
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/Configure_file.py
|
<reponame>jd-aig/aves2_algorithm_components
import tensorflow as tf
def configure_lr(init_lr, decay_policy, decay_steps, decay_rate, global_steps, warm_lr=0.0001, warm_steps=0):
lr_decay_list = {
'exponential_decay': tf.train.exponential_decay(learning_rate=init_lr,
global_step=global_steps,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=True
),
'natural_exp_decay': tf.train.natural_exp_decay(
learning_rate=init_lr,
global_step=global_steps,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=True
),
'polynomial_decay': tf.train.polynomial_decay(
learning_rate=init_lr,
global_step=global_steps,
decay_steps=decay_steps,
end_learning_rate=1e-1 * init_lr,
power=2.0,
cycle=True,
name=None
),
'fixed':init_lr
}
def false_fn():
return lr_decay_list[decay_policy]
def true_fn():
return tf.train.polynomial_decay(learning_rate=warm_lr,
global_step=global_steps,
decay_steps=warm_steps,
end_learning_rate=init_lr,
power=1.0
)
pred_result = global_steps < warm_steps
learning_rate = tf.cond(pred_result, true_fn, false_fn)
return learning_rate
def configure_optimizer(optimizer, learning_rate):
opt_gpu_list = {
'rmsp': tf.train.RMSPropOptimizer(learning_rate,epsilon=1),
'adam': tf.train.AdamOptimizer(learning_rate),
'sgd': tf.train.GradientDescentOptimizer(learning_rate),
'mometum': tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)
}
return opt_gpu_list[optimizer]
def model_optimizer(model_name,learning_rate):
opt_list = {
'inception_v4' : tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9),
'inception_resnet_v2': tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9),
'resnet_v2_101': tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9),
'mobilenet_v2_14': tf.train.RMSPropOptimizer(learning_rate,momentum=0.9,decay=0.9),
'nasnet_large': tf.train.RMSPropOptimizer(learning_rate,epsilon=1,decay=0.9),
'pnasnet_large': tf.train.RMSPropOptimizer(learning_rate),
'vgg_16': tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)
}
return opt_list[model_name]
def model_exclusions(model_name):
exclusions_list = {
'inception_v1': ['InceptionV1/Logits'],
'inception_v2': ['InceptionV2/Logits'],
'inception_v3': ['InceptionV3/Logits', 'InceptionV3/AuxLogits'],
'inception_v4': ['InceptionV4/Logits', 'InceptionV4/AuxLogits'],
'inception_resnet_v2': ['InceptionResnetV2/Logits', 'InceptionResnetV2/AuxLogits'],
'vgg_16': ['vgg_16/fc8'],
'vgg_19': ['vgg_19/fc8'],
'resnet_v1_50': ['resnet_v1_50/logits'],
'resnet_v1_101': ['resnet_v1_101/logits'],
'resnet_v1_152': ['resnet_v1_152/logits'],
'resnet_v2_50': ['resnet_v2_50/logits'],
'resnet_v2_101': ['resnet_v2_101/logits'],
'resnet_v2_152': ['resnet_v2_152/logits'],
'mobilenet_v1_025': ['MobilenetV1/Logits'],
'mobilenet_v1_050' : ['MobilenetV1/Logits'],
'mobilenet_v1_10':['MobilenetV1/Logits'],
'mobilenet_v2_10':['MobilenetV2/Logits'],
'mobilenet_v2_14':['MobilenetV2/Logits'],
'nasnet_large': ['final_layer','aux'],
'nasnet_mobile': ['final_layer','aux'],
'pnasnet_large': ['final_layer','aux'],
'pnasnet_mobile': ['final_layer','aux']
}
return exclusions_list[model_name]
def configure_image_size(model_name):
image_size_list = {
'inception_v4' : 299,
'inception_resnet_v2': 299,
'resnet_v2_101': 224,
'mobilenet_v2_14': 224,
'nasnet_large': 331,
'pnasnet_large': 331,
'vgg_16': 224
}
return image_size_list[model_name]
def configure_weight_decay(model_name):
weight_decay_list = {
'inception_v4' : 0.0004,
'inception_resnet_v2': 0.0004,
'resnet_v2_101': 0.0001,
'mobilenet_v2_14': 0.0004,
'nasnet_large': 0.00004,
'pnasnet_large': 0.00004,
'vgg_16': 0.0005
}
return weight_decay_list[model_name]
def configure_batch_size(model_name):
batch_size_list = {
'inception_v4' : 32,
'inception_resnet_v2': 32,
'resnet_v2_101': 64,
'mobilenet_v2_14': 96,
'nasnet_large': 8,
'pnasnet_large': 8,
'vgg_16': 128
}
return batch_size_list[model_name]
def configure_init_learning_rate(model_name):
init_learning_rate_list = {
'inception_v4' : 0.001,
'inception_resnet_v2': 0.001,
'resnet_v2_101': 0.001,
'mobilenet_v2_14': 0.0001,
'nasnet_large': 0.0001,
'pnasnet_large': 0.0001,
'vgg_16': 0.001
}
return init_learning_rate_list[model_name]
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/models/dssm.py
|
<filename>src/nlp/PlatformNlp/models/dssm.py
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ['DssmModel']
__author__ = 'xulu46'
__date__ = '2020.10.14'
"""The main dssm model and related functions."""
import copy
import tensorflow as tf
from PlatformNlp.modules.embedding_lookup import embedding_lookup
from PlatformNlp.models import register_model, register_model_architecture
from PlatformNlp.models.platform_model import PlatformModel
from PlatformNlp.modules.dssm_layer import dssm_layer
from PlatformNlp.modules.utils import get_activation
@register_model('dssm')
class DssmModel(PlatformModel):
"""
```python
# Already been converted into WordPiece token ids
...
```
"""
def __init__(self, is_training, features, vocab_size, act, embedding_size, hidden_sizes, max_seq_length,
dropout_prob, initializer_range):
query_ids = features["input_ids_1"]
doc_ids = features["input_ids_2"]
with tf.variable_scope("dssm"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.query_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=query_ids,
vocab_size=vocab_size,
embedding_size=embedding_size,
initializer_range=initializer_range,
word_embedding_name="query_embeddings",
embedding_initializer=None)
(self.doc_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=doc_ids,
vocab_size=vocab_size,
embedding_size=embedding_size,
initializer_range=initializer_range,
word_embedding_name="doc_embeddings",
embedding_initializer=None)
with tf.variable_scope("dssm"):
self.query_pred, self.doc_pred, self.cos_sim_prob = dssm_layer(self.query_embedding_output,
self.doc_embedding_output, hidden_sizes,
get_activation(act), is_training,
max_seq_length, embedding_size,
initializer_range, dropout_prob)
def get_embedding(self):
return self.embedding_table
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--vocab_size', type=int,
help='vocab size')
parser.add_argument('--neg', type=int,
help="neg sampling")
parser.add_argument('--embedding_size', type=int,
help='textcnn embedding dimension')
parser.add_argument('--act', type=str, choices=["tanh", "linear", "relu", "gelu"],
help='filter size for conv layer')
parser.add_argument('--hidden_sizes', type=str,
help='num filter for each filter')
parser.add_argument('--l2_reg_lambda', type=float,
help='l2 reg')
parser.add_argument('--drop_prob', type=float,
help='dropout prob for textcnn output layer')
parser.add_argument('--initializer_range', type=float, default=0.1,
help='initializer range for embedding')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
hidden_sizes = "500, 200" if args.hidden_sizes is None else args.hidden_sizes
hidden_sizes = hidden_sizes.split(",")
hidden_sizes = [int(hidden_size) for hidden_size in hidden_sizes]
istraining = task.mode == tf.estimator.ModeKeys.TRAIN
return DssmModel(istraining, task.features, args.vocab_size, args.act, args.embedding_size, hidden_sizes,
args.max_seq_length, args.drop_prob, args.initializer_range)
def get_output(self):
return self.cos_sim_prob
@register_model_architecture('dssm', 'dssm')
def base_architecture(args):
args.vocab_size = 21128 if args.vocab_size is None else args.vocab_size
args.neg = 4 if args.neg is None else args.neg
args.act = "relu" if args.act is None else args.act
args.embedding_size = 128 if args.embedding_size is None else args.embedding_size
args.max_seq_length = 200 if args.max_seq_length is None else args.max_seq_length
args.l2_reg_lambda = 0.1 if args.l2_reg_lambda is None else args.l2_reg_lambda
args.hidden_sizes = getattr(args, 'hidden_sizes', "500,200")
args.drop_prob = 0.1 if args.drop_prob is None else args.drop_prob
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/models/bert.py
|
<filename>src/nlp/PlatformNlp/models/bert.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ['BertModel']
__author__ = 'xulu46'
__date__ = '2019.09.29'
"""The main bert model and related functions."""
import copy
import tensorflow as tf
from PlatformNlp.modules.embedding_lookup import embedding_postprocessor
from PlatformNlp.modules.embedding_lookup import embedding_lookup
from PlatformNlp.models import register_model, register_model_architecture
from PlatformNlp.models.platform_model import PlatformModel
from PlatformNlp.modules.utils import get_shape_list, get_activation, create_initializer
from PlatformNlp.modules.attention import create_attention_mask_from_input_mask
from PlatformNlp.modules.transformer import transformer_model
@register_model('bert')
class BertModel(PlatformModel):
"""
```python
# Already been converted into WordPiece token ids
...
```
"""
def __init__(self, features, sequence, is_training, vocab_size, hidden_size, initializer_range, type_vocab_size,
max_position_embeddings, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act,
hidden_dropout_prob, attention_probs_dropout_prob, use_one_hot_embeddings=True, scope=None):
if "input_ids" in features:
input_ids = features["input_ids"]
else:
input_ids_1 = features["input_ids_1"]
input_ids_2 = features["input_ids_2"]
input_ids = tf.concat([input_ids_1, input_ids_2], axis=1)
input_mask = features["input_mask"]
token_type_ids = features["segment_ids"]
self.sequence = sequence
if not is_training:
hidden_dropout_prob = 0.0
attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=vocab_size,
embedding_size=hidden_size,
initializer_range=initializer_range,
word_embedding_name="word_embeddings")
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=initializer_range,
max_position_embeddings=max_position_embeddings,
dropout_prob=hidden_dropout_prob)
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
intermediate_act_fn=get_activation(hidden_act),
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
return self.sequence_output
def get_output(self):
if self.sequence == "sequence":
return self.get_sequence_output()
else:
return self.get_pooled_output()
def get_embedding(self):
return self.embedding_table
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--vocab_size', type=int,
help='vocab size')
parser.add_argument('--drop_prob', type=float,
help='drop out prob for output layer')
parser.add_argument('--max_position_embeddings', type=int, default=512,
help='vocab size')
parser.add_argument('--attention_probs_dropout_prob', type=float,
help='attention_probs_dropout_prob for each layer')
parser.add_argument('--hidden_act', type=str, default="gelu",
help='hidden act')
parser.add_argument('--hidden_dropout_prob', type=float, default=0.1,
help='hidden dropout prob for each layer')
parser.add_argument('--hidden_size', type=int, default=768,
help='hidden size for bert')
parser.add_argument('--initializer_range', type=float, default=0.02,
help='initializer_range for bert model')
parser.add_argument('--intermediate_size', type=int, default=3072,
help='intermediate_size for transformer model')
parser.add_argument('--num_attention_heads', type=int, default=12,
help='num_attention_heads for transformer model')
parser.add_argument('--num_hidden_layers', type=int, default=12,
help='num_hidden_layers for transformer model')
parser.add_argument('--type_vocab_size', type=int, default=2,
help='type_vocab_size for transformer model')
parser.add_argument('--l2_reg_lambda', type=float, default=0.1,
help='l2 reg')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
if args.task == "ner":
sequence = "sequence"
else:
sequence = "first"
is_training = (task.mode == tf.estimator.ModeKeys.TRAIN)
return BertModel(task.features, sequence, is_training, args.vocab_size, args.hidden_size,
args.initializer_range, args.type_vocab_size, args.max_position_embeddings,
args.num_hidden_layers, args.num_attention_heads, args.intermediate_size, args.hidden_act,
args.hidden_dropout_prob, args.attention_probs_dropout_prob)
@register_model_architecture('bert', 'bert')
def base_architecture(args):
args.vocab_size = 21128
args.drop_prob = 0.1 if args.drop_prob is None else args.drop_prob
args.max_position_embeddings = 512 if args.max_position_embeddings is None else args.max_position_embeddings
args.attention_probs_dropout_prob = 0.1 if args.attention_probs_dropout_prob is None else args.attention_probs_dropout_prob
args.hidden_act = "gelu" if args.hidden_act is None else args.hidden_act
args.hidden_dropout_prob = 0.1 if args.hidden_dropout_prob is None else args.hidden_dropout_prob
args.hidden_size = 768 if args.hidden_size is None else args.hidden_size
args.initializer_range = 0.02 if args.initializer_range is None else args.initializer_range
args.intermediate_size = 3072 if args.intermediate_size is None else args.intermediate_size
args.num_attention_heads = 12 if args.num_attention_heads is None else args.num_attention_heads
args.num_hidden_layers = 12 if args.num_hidden_layers is None else args.num_hidden_layers
args.type_vocab_size = 2 if args.type_vocab_size is None else args.type_vocab_size
args.l2_reg_lambda = 0.1 if args.l2_reg_lambda is None else args.l2_reg_lambda
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/models/cdssm.py
|
<gh_stars>1-10
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ['CdssmModel']
__author__ = 'xulu46'
__date__ = '2020.10.14'
"""The main cdssm model and related functions."""
import copy
import tensorflow as tf
from PlatformNlp.modules.embedding_lookup import embedding_lookup
from PlatformNlp.models import register_model, register_model_architecture
from PlatformNlp.models.platform_model import PlatformModel
from PlatformNlp.modules.dssm_layer import dssm_layer
from PlatformNlp.modules.utils import get_activation
from PlatformNlp.modules.conv_layer import conv_layer
@register_model('cdssm')
class CdssmModel(PlatformModel):
"""
```python
# Already been converted into WordPiece token ids
...
```
"""
def __init__(self, is_training, features, vocab_size, filter_sizes, num_filters, act, embedding_size, hidden_sizes, max_seq_length, dropout_prob, initializer_range):
query_ids = features["input_ids_1"]
doc_ids = features["input_ids_2"]
with tf.variable_scope("cdssm"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.query_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=query_ids,
vocab_size=vocab_size,
embedding_size=embedding_size,
initializer_range=initializer_range,
word_embedding_name="query_embeddings",
embedding_initializer=None)
self.query_embedding_output = tf.expand_dims(self.query_embedding_output, axis=[-1])
(self.doc_embedding_output, self.embedding_table) = embedding_lookup(
input_ids=doc_ids,
vocab_size=vocab_size,
embedding_size=embedding_size,
initializer_range=initializer_range,
word_embedding_name="doc_embeddings",
embedding_initializer=None)
self.doc_embedding_output = tf.expand_dims(self.doc_embedding_output, axis=[-1])
with tf.variable_scope("conv"):
self.query_conv_output = conv_layer(
input_tensor=self.query_embedding_output,
filter_sizes=filter_sizes,
num_filters=num_filters,
initializer_range=initializer_range)
num_filters_total = num_filters * len(filter_sizes)
self.query_h_pool = tf.concat(self.query_conv_output, 3)
self.query_h_pool_flat = tf.reshape(self.query_h_pool, [-1, num_filters_total])
self.doc_conv_output = conv_layer(
input_tensor=self.doc_embedding_output,
filter_sizes=filter_sizes,
num_filters=num_filters,
initializer_range=initializer_range)
self.doc_h_pool = tf.concat(self.doc_conv_output, 3)
self.doc_h_pool_flat = tf.reshape(self.doc_h_pool, [-1, num_filters_total])
with tf.variable_scope("dssm"):
self.query_pred, self.doc_pred, self.cos_sim_prob = dssm_layer(self.query_h_pool_flat, self.doc_h_pool_flat, hidden_sizes, get_activation(act), is_training, max_seq_length, embedding_size, initializer_range, dropout_prob)
def get_embedding(self):
return self.embedding_table
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--vocab_size', type=int,
help='vocab size')
parser.add_argument('--embedding_size', type=int,
help='textcnn embedding dimension')
parser.add_argument('--act', type=str, choices=["tanh", "linear", "relu", "gelu"],
help='filter size for conv layer')
parser.add_argument('--hidden_sizes', type=str,
help='num filter for each filter')
parser.add_argument('--filter_sizes', type=str,
help='filter size for conv layer')
parser.add_argument('--num_filters', type=int,
help='num filter for each filter')
parser.add_argument('--l2_reg_lambda', type=float,
help='l2 reg')
parser.add_argument('--drop_prob', type=float,
help='dropout prob for textcnn output layer')
parser.add_argument('--initializer_range', type=float,
help='initializer range for embedding')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
hidden_sizes = "500, 200" if args.hidden_sizes is None else args.hidden_sizes
hidden_sizes = hidden_sizes.split(",")
hidden_sizes = [int(hidden_size) for hidden_size in hidden_sizes]
filter_sizes = "2,3,4" if args.filter_sizes is None else args.filter_sizes
filter_sizes = filter_sizes.split(",")
filter_sizes = [int(filter_size) for filter_size in filter_sizes]
istraining = task.mode == tf.estimator.ModeKeys.TRAIN
return CdssmModel(istraining, task.features, args.vocab_size, filter_sizes, args.num_filters, args.act, args.embedding_size, hidden_sizes, args.max_seq_length, args.drop_prob, args.initializer_range)
def get_output(self):
return self.cos_sim_prob
@register_model_architecture('cdssm', 'cdssm')
def base_architecture(args):
args.vocab_size = 21128 if args.vocab_size is None else args.vocab_size
args.act = "relu" if args.act is None else args.act
args.embedding_size = 128 if args.embedding_size is None else args.embedding_size
args.max_seq_length = 200 if args.max_seq_length is None else args.max_seq_length
args.hidden_sizes = getattr(args, 'hidden_sizes', "500,200")
args.num_filters =128 if args.num_filters is None else args.num_filters
args.filter_sizes = getattr(args, 'filter_sizes', "2,3,4")
args.num_filters = 128 if args.num_filters is None else args.num_filters
args.l2_reg_lambda = 0.1 if args.l2_reg_lambda is None else args.l2_reg_lambda
args.drop_prob = 0.9 if args.drop_prob is None else args.drop_prob
args.initializer_range = 0.1 if args.initializer_range is None else args.initializer_range
|
jd-aig/aves2_algorithm_components
|
src/ml/classification/test_classification/run.py
|
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./model/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="virginica")
args = parser.parse_args()
test_dataset = None
if os.path.exists(os.path.join(args.data_dir,'test.csv')):
test_dataset = os.path.join(args.data_dir,'test.csv')
elif os.path.exists(os.path.join(args.data_dir,'val.csv')):
test_dataset = os.path.join(args.data_dir,'val.csv')
elif os.path.exists(os.path.join(args.data_dir,'train.csv')):
test_dataset = os.path.join(args.data_dir,'train.csv')
else:
print("ERROR:test file invalid!")
exit()
test_data = pd.read_csv(test_dataset)
lst = test_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_val = test_data.ix[:,args.target].values
x_val = test_data.ix[:,lst].values
save_path = os.path.join(args.model_dir,'model.m')
model = joblib.load(save_path)
predict = model.predict(x_val)
pred_csv = pd.concat([test_data,pd.DataFrame(columns=['PREDICT'],data=predict)],sort=False,axis=1)
pred_csv.to_csv(os.path.join(args.output_path,'result.csv'),float_format = '%.3f')
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/data/__init__.py
|
<reponame>jd-aig/aves2_algorithm_components<gh_stars>1-10
import argparse
import importlib
import os
from PlatformNlp.data.base_dataset import BaseDataset
DATA_REGISTRY = {}
def get_available_word_split_impl():
return ['word', 'char']
def get_available_type():
return ["train", "valid", "test"]
def get_dataset(args):
return DATA_REGISTRY[args.task](args)
def register_dataset(name):
"""
New dataset types can be added to platform with the :func:`register_data`
function decorator.
For example::
@register_dataset('multi_class')
class MultiClassFixLenDataset():
(...)
.. note:: All datasets must implement the :class:`BaseDataset` interface.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in DATA_REGISTRY:
raise ValueError('Cannot register duplicate dataset ({})'.format(name))
if not issubclass(cls, BaseDataset):
raise ValueError('Dataset ({}: {}) must extend BaseDataset'.format(name, cls.__name__))
DATA_REGISTRY[name] = cls
return cls
return register_model_cls
# automatically import any Python files in the data/ directory
datasets_dir = os.path.dirname(__file__)
for file in os.listdir(datasets_dir):
path = os.path.join(datasets_dir, file)
if (
not file.startswith('_')
and not file.startswith('.')
and (file.endswith('.py') or os.path.isdir(path))
):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('PlatformNlp.data.' + model_name)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/criterions/multi_label_cross_entropy.py
|
<filename>src/nlp/PlatformNlp/criterions/multi_label_cross_entropy.py
import math
import tensorflow as tf
from PlatformNlp.criterions.platform_criterion import PlatformNlpCriterion
from PlatformNlp.criterions import register_criterion
@register_criterion('multi_label_cross_entropy')
class MultiLabelCrossEntropyCriterion(PlatformNlpCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
self.task = task
def get_loss(self):
"""Construct a criterion from command-line args."""
output_layer = self.task.model.get_output()
hidden_size = output_layer.shape[-1].value
l2_loss = tf.constant(0.0)
output_weights_layer = tf.get_variable(
"output_weights_layer", [self.args.num_labels, hidden_size],
initializer=tf.contrib.layers.xavier_initializer())
output_bias_layer = tf.get_variable(
"output_bias_layer", [self.args.num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
is_training = self.task.mode == tf.estimator.ModeKeys.TRAIN
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob= 1 - self.args.drop_prob)
logits = tf.matmul(output_layer, output_weights_layer, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias_layer)
l2_loss += tf.nn.l2_loss(output_weights_layer)
l2_loss += tf.nn.l2_loss(output_bias_layer)
probabilities = tf.nn.sigmoid(logits)
multi_labels = tf.cast(self.task.labels, dtype=tf.float32)
per_example_loss_multi_logits = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(labels=multi_labels, logits=logits), axis=-1)
per_example_loss = per_example_loss_multi_logits
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities)
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/pre.py
|
import tensorflow as tf
import math
def parse_example_proto(example_serialized):
# a dict mapping from feature keys to tensor and sparsetensor values
feature_map = {
'image/height': tf.VarLenFeature(dtype=tf.int64),
'image/width': tf.VarLenFeature(dtype=tf.int64),
'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
#'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
#'image/format': tf.FixedLenFeature((), tf.string, default_value='JPEG'),
'image/filename': tf.FixedLenFeature((), tf.string, default_value=''),
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
}
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
return features['image/encoded'], label, features['image/filename'] #, features['image/class/text']
def preprocess(image_buffer, is_training, model_name, height, width):
image = tf.image.decode_image(image_buffer, channels=3)
image = tf.cast(image, tf.float32)
#image = tf.image.decode_image(image_buffer, channels=3, dtype=tf.float32)
#print(image)
pred = tf.equal(is_training,'train')
from preprocessing import preprocessing_factory
def f1():
image_preprocessing_fn = preprocessing_factory.get_preprocessing(model_name,is_training=True)
images = image_preprocessing_fn(image, height, width)
#images = tf.Print(images,[model_name])
return images
def f2():
image_preprocessing_fn = preprocessing_factory.get_preprocessing(model_name,is_training=False)
images = image_preprocessing_fn(image, height, width)
#images = tf.Print(images,[model_name])
return images
imagess = tf.cond(pred, f1, f2)
#imagess = tf.Print(imagess,[imagess])
return imagess
#preprocessing_factory.get_preprocessing(model_name,is_training=True)
#preprocessing_factory.get_preprocessing(model_name,is_training=False)
|
jd-aig/aves2_algorithm_components
|
src/cv/face_detection/faceboxes/test_faceboxes/try_detector.py
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import numpy as np
from PIL import Image, ImageDraw
import os
import cv2
import time
from face_detector import FaceDetector
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",type=str,default='../data_dir/')
parser.add_argument("--model_dir",type=str,default='../model_dir/')
parser.add_argument("--output_path",type=str,default='../output_path/')
args = parser.parse_args()
MODEL_PATH = os.path.join(args.model_dir,'frozen_inference_graph.pb')
face_detector = FaceDetector(MODEL_PATH, gpu_memory_fraction=0.95, visible_device_list='0')
def draw_boxes_on_image(image, boxes, scores):
image_copy = image.copy()
draw = ImageDraw.Draw(image_copy, 'RGBA')
width, height = image.size
for b, s in zip(boxes, scores):
ymin, xmin, ymax, xmax = b
fill = (255, 0, 0, 45)
outline = 'red'
draw.rectangle(
[(xmin, ymin), (xmax, ymax)],
fill=fill, outline=outline
)
draw.text((xmin, ymin), text='{:.3f}'.format(s))
return image_copy
times = []
#print(os.listdir(args.data_dir))
for filename in os.listdir(os.path.join(args.data_dir,'images')):
path = os.path.join(args.data_dir,'images',filename)
image_array = cv2.imread(path)
image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
#image = Image.fromarray(image_array)
start = time.time()
boxes, scores = face_detector(image_array, score_threshold=0.3)
print("image cost time=%.2f ms" %(1000*(time.time()-start)))
image_out = draw_boxes_on_image(Image.fromarray(image_array), boxes, scores)
image_out.save(os.path.join(args.output_path,filename),quality=95)
#print(boxes)
#print(scores)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/metrics/__init__.py
|
<filename>src/nlp/PlatformNlp/metrics/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from .platform_metrics import PlatformMetrice
METRICES_REGISTRY = {}
METRICES_CLASS_NAMES = set()
def register_metrices(name):
"""
New metrices can be added to PlatformNlp with the
:func:`~PlatformNlp.metrics.register_metrices` function decorator.
"""
def register_metrices_cls(cls):
if name in METRICES_REGISTRY:
raise ValueError('Cannot register duplicate metrices ({})'.format(name))
if not issubclass(cls, PlatformMetrice):
raise ValueError('metrices ({}: {}) must extend PlatformTask'.format(name, cls.__name__))
if cls.__name__ in METRICES_CLASS_NAMES:
raise ValueError('Cannot register metrices with duplicate class name ({})'.format(cls.__name__))
METRICES_REGISTRY[name] = cls
METRICES_CLASS_NAMES.add(cls.__name__)
return cls
return register_metrices_cls
def get_metrices(name):
return METRICES_REGISTRY[name]
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith('_')
and not file.startswith('.')
and (file.endswith('.py') or os.path.isdir(path))
):
task_name = file[:file.find('.py')] if file.endswith('.py') else file
importlib.import_module('PlatformNlp.metrics.' + task_name)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/attention.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from PlatformNlp.modules.utils import get_shape_list, create_initializer, reshape_to_matrix
from PlatformNlp.modules.drop_out import dropout
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/options.py
|
import argparse
import sys
from typing import Callable, List, Optional
from PlatformNlp import utils
from PlatformNlp.data import get_available_word_split_impl, get_available_type
def get_preprocessing_parser(default_task="multi_class"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="multi_class"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser)
return parser
def csv_str_list(x):
return x.split(',')
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_str_dict(x, type=dict):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
return x
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from PlatformNlp.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(parser)
# Add *-specific args to parser.
from PlatformNlp.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from PlatformNlp.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if hasattr(args, "metrices"):
from PlatformNlp.metrics import METRICES_REGISTRY
METRICES_REGISTRY[args.metrices].add_args(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Apply architecture configuration.
# if hasattr(args, "arch"):
# ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="multi_class"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--output_dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
# fmt: off
parser.add_argument('--output_dir', metavar='DIR', default='',
help='path to save models')
parser.add_argument('--model_dir', metavar='DIR', default='',
help='path to load models')
from PlatformNlp.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
'--' + registry_name.replace('_', '-'),
default=REGISTRY['default'],
choices=REGISTRY['registry'].keys(),
)
# Task definitions can be found under fairseq/tasks/
from PlatformNlp.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task')
from PlatformNlp.metrics import METRICES_REGISTRY
parser.add_argument('--metrics', metavar='METRICS', default="multi_class_cross_entry_metrics",
choices=METRICES_REGISTRY.keys(),
help='metrics')
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("--data_file", metavar="DATA",
help='sources data file')
group.add_argument("--dict_file", metavar="DICT",
help="given dictionary or Generated object file")
group.add_argument("--max_seq_length", metavar="MAX_SEQ_LENGTH", default=200, type=int,
help="max_sent_length of a sentence")
group.add_argument("--word_format", metavar="WORD_FORMAT", choices=get_available_word_split_impl(),
help='choice word format to generate words')
group.add_argument("--type", metavar="TYPE", choices=get_available_type(),
help="generate type")
group.add_argument("--label_file", metavar="LABEL_FILE", help="source label file or dest label file")
group.add_argument("--output_file", metavar="OUTPUT_FILE", help="destinate label file")
return parser
def add_dataset_args(parser):
group = parser.add_argument_group("Dataset and data loading")
# fmt: off
group.add_argument("--type", metavar="TYPE", choices=get_available_type(),
help="generate type")
group.add_argument("--batch_size", metavar="BATCH_SIZE", type=int, help="generate type")
group.add_argument('--data_file', metavar="DATA_FILE", type=str,
help='data file of the input tfrecord')
group.add_argument('--label_file', metavar="LABEL_FILE", type=str,
help='label file of the input')
group.add_argument('--train_data_file', metavar="DATA_FILE", type=str,
help='data file of the input tfrecord')
group.add_argument('--eval_data_file', metavar="DATA_FILE", type=str,
help='data file of the input tfrecord')
group.add_argument('--test_data_file', metavar="DATA_FILE", type=str,
help='data file of the input tfrecord')
group.add_argument('--max_seq_length', metavar="MAX_SEQ_LENGTH", default=200, type=int,
help="max_sent_length of a sentence")
# fmt: on
return group
def add_optimization_args(parser):
group = parser.add_argument_group("Optimization")
# fmt: off
group.add_argument('--epoch', '--e', default=1, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--clip_norm', default=0.0, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--learning_rate', default='0.001', type=float,
metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--warmup_proportion', default=0.1, type=float, metavar='WARMUP',
help='warmup_proportion')
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("Checkpointing")
# fmt: off
group.add_argument('--init_checkpoint', metavar='DIR', type=str,
help='init checkpoint name')
group.add_argument('--device_map', metavar='DEVICE', type=str, default='-1',
help='devicp_map')
group.add_argument("--save_checkpoints_steps", metavar='DEVICE', type=int, default=100,
help="How often to save the model checkpoint.")
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
# fmt: off
group.add_argument('--batch_size', default=1, type=int, metavar='N',
help='predict n sentences each time')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from PlatformNlp.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
group.add_argument("--do_train", dest='do_train', action='store_true', help="if do train type")
group.add_argument("--do_eval", dest='do_eval', action='store_true', help="if do eval type")
group.add_argument("--inter_op_parallelism_threads", type=int, default=0,
help="the inter_op_parallelism_threads to set the gpu config")
group.add_argument("--intra_op_parallelism_threads", type=int, default=0,
help="Number of intra_op_parallelism_threads to use for CPU. ")
group.add_argument("--max_steps_without_decrease", type=int, default=100, help="max step without decrease")
# fmt: on
return group
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_end2end/ATTENTION_OCR/datasets/newtextdataset.py
|
<reponame>jd-aig/aves2_algorithm_components<gh_stars>1-10
from datasets import fsns
import os
import json
DEFAULT_DATASET_DIR = 'dataset_dir/'
def get_split(split_name, config_path, dataset_dir=None, config=None):
if not dataset_dir:
dataset_dir = DEFAULT_DATASET_DIR
if not config:
config = json.load(open(config_path,'r'))
return fsns.get_split(split_name, dataset_dir, config)
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/nets/nets_factory.py
|
<reponame>jd-aig/aves2_algorithm_components
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from nets import alexnet
from nets import cifarnet
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import vgg
from nets.mobilenet import mobilenet_v2
from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = tf.contrib.slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1_10': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2_10': mobilenet_v2.mobilenet,
'mobilenet_v2_14': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile
}
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2':
inception.inception_resnet_v2_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1_10': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2_10': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_14': mobilenet_v2.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification. If 0 or None,
the logits layer is omitted and its input features are returned instead.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
net, end_points = network_fn(images)
The `images` input is a tensor of shape [batch_size, height, width, 3]
with height = width = network_fn.default_image_size. (The permissibility
and treatment of other sizes depends on the network_fn.)
The returned `end_points` are a dictionary of intermediate activations.
The returned `net` is the topmost layer, depending on `num_classes`:
If `num_classes` was a non-zero integer, `net` is a logits tensor
of shape [batch_size, num_classes].
If `num_classes` was 0 or `None`, `net` is a tensor with the input
to the logits layer of shape [batch_size, 1, 1, num_features] or
[batch_size, num_features]. Dropout has not been applied to this
(even if the network's original classification does); it remains for
the caller to do this or not.
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes, is_training=is_training, **kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
|
jd-aig/aves2_algorithm_components
|
src/nlp/Platform_cli/preprocess.py
|
import logging
import os
import sys
sys.path.append("../")
sys.path.append("../PlatformNlp/")
from PlatformNlp import options, tasks, utils
from PlatformNlp.data import get_dataset
from PlatformNlp.tokenization import CharTokenizer, WordTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('PlatformNlp.preprocess')
def main(args):
utils.import_user_module(args)
os.makedirs(args.output_dir, exist_ok=True)
logger.addHandler(logging.FileHandler(
filename=os.path.join(args.output_dir, 'preprocess.log'),
))
logger.info(args)
# first create task
task = tasks.get_task(args.task)
# create dict
if args.dict_file is not None and os.path.exists(args.dict_file):
dict = task.load_dictionary(args.dict_file)
else:
dict = None
# create tokenizer
if args.word_format == "char":
tokenizer = CharTokenizer(dict)
else:
tokenizer = WordTokenizer(dict)
if not os.path.exists(args.dict_file):
# build dict and reload dict
dict = task.build_dictionary(args.data_file, tokenizer)
tokenizer.set_dict(dict)
d = get_dataset(args)
d.build_dataset(args, tokenizer)
def cli_main():
parser = options.get_preprocessing_parser()
args = options.parse_args_and_arch(parser, modify_parser=None)
main(args)
if __name__ == "__main__":
cli_main()
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/early_stop.py
|
<reponame>jd-aig/aves2_algorithm_components
def loss_early_stop(loss, param):
#print('Last Loss {}; Loss Count:{}'.format(param['last'],param['count']))
if loss >= param['last']:
param['count'] += 1
else:
param['count'] = 0
param['last'] = loss
return param
def top1_early_stop(top1, param):
if top1 > param['top1_max']:
param['top1_max'] = top1
param['count'] = 0
else:
param['count'] += 1
return param
|
jd-aig/aves2_algorithm_components
|
src/cv/object_detection/test_objd/test.py
|
<filename>src/cv/object_detection/test_objd/test.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import time
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'data_dir', None, 'Path to data directory '
'where event and checkpoint files will be written.')
tf.app.flags.DEFINE_string(
'output_path', None, 'Path to output data directory '
'where event and checkpoint files will be written.')
tf.app.flags.DEFINE_string(
'model_dir', None, 'Path to output model directory '
'where event and checkpoint files will be written.')
tf.app.flags.DEFINE_float(
'min_score_thresh', 0.5, 'min_score_thresh')
PATH_TO_FROZEN_GRAPH = os.path.join(FLAGS.model_dir,'frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(FLAGS.data_dir,'ImageSets/label_map.pbtxt')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
PATH_TO_TEST_IMAGES_DIR = os.path.join(FLAGS.data_dir,'JPEGImages')
VAL_TXT_DIR = os.path.join(FLAGS.data_dir,'ImageSets')
#TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, '*.jpg')]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
with detection_graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
VAL_TXT = os.path.join(VAL_TXT_DIR,'val.txt')
with open(VAL_TXT,'r') as f:
content = [line.strip() for line in f]
#print(content)
for image_name in content:
TEST_IMAGE_PATHS = None
ext = None
if os.path.exists(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.jpg')):
TEST_IMAGE_PATHS = os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.jpg')
ext = '.jpg'
elif os.path.exists(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.JPG')):
TEST_IMAGE_PATHS = os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.JPG')
ext = '.JPG'
elif os.path.exists(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.jpeg')):
TEST_IMAGE_PATHS = os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.jpeg')
ext = '.jpeg'
elif os.path.exists(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.JPEG')):
TEST_IMAGE_PATHS = os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.JPEG')
ext = '.JPEG'
elif os.path.exists(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.png')):
TEST_IMAGE_PATHS = os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.png')
ext = '.png'
elif os.path.exists(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.PNG')):
TEST_IMAGE_PATHS = os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name+'.PNG')
ext = '.PNG'
else:
print(os.path.join(PATH_TO_TEST_IMAGES_DIR,image_name),' is not exists!')
continue
#print(image_name)
print(TEST_IMAGE_PATHS)
image = Image.open(TEST_IMAGE_PATHS)
#image = Image.open("/export/luozhuang/data_tf/tfserving/client_server/images/1.jpg")
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
start = time.time()
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image_np_expanded})
print("per image cost time=%.2f ms" %(1000*(time.time()-start)))
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
#print(output_dict['detection_classes'])
#break
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
min_score_thresh=FLAGS.min_score_thresh,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
im = Image.fromarray(image_np)
im.save(os.path.join(FLAGS.output_path,image_name+ext))
|
jd-aig/aves2_algorithm_components
|
src/cv/object_detection/ssd_mobilenetv2/cal_params.py
|
import os
import argparse
from object_detection.utils import label_map_util
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="./")
parser.add_argument("--epochs", type=int, default=30)
parser.add_argument("--batch_size", type=int, default=24)
args = parser.parse_args()
label_map_path = os.path.join(args.data_dir, 'ImageSets', 'label_map.pbtxt')
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
num_classes_val = len(label_map_dict)
train_txt = os.path.join(args.data_dir, 'ImageSets', 'train.txt')
val_txt = os.path.join(args.data_dir, 'ImageSets', 'val.txt')
count = 0
for index, line in enumerate(open(train_txt,'r')):
count += 1
num_examples_train = count
count = 0
for index, line in enumerate(open(val_txt,'r')):
count += 1
num_examples_val = count
num_train_steps_val = num_examples_train//args.batch_size
if num_train_steps_val == 0:
num_train_steps_val = 1
num_train_steps_val = num_train_steps_val*args.epochs
decay_steps_val = num_examples_train//args.batch_size
if decay_steps_val == 0:
decay_steps_val = 1
decay_factor_val = 0.9
print("num_classes_val = ",num_classes_val," ,decay_steps_val = ",decay_steps_val," ,decay_factor_val = ",decay_factor_val," ,num_examples_val = ",num_examples_val," ,num_train_steps_val = ",num_train_steps_val)
f = open(os.path.join("cal_params.txt"),"w")
f.write(str(num_classes_val)+'\n')
f.write(str(decay_steps_val)+'\n')
f.write(str(decay_factor_val)+'\n')
f.write(str(num_examples_val)+'\n')
f.write(str(num_train_steps_val)+'\n')
f.close()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/metrics/platform_metrics.py
|
import logging
import os
import warnings
logger = logging.getLogger(__name__)
class PlatformMetrice(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
def __init__(self, args, input_ids, label_ids, predict_scores, label_mapping):
self.args = args
self.input_ids = input_ids
self.label_ids = label_ids
self.predict_scores = predict_scores
self.label_mapping = label_mapping
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_detection/CTPN/main/eval.py
|
import datetime
import os
import sys
import time
import numpy
import tensorflow as tf
sys.path.append(os.getcwd())
for i in os.listdir(os.getcwd()):
if not '.' in i:
sys.path.append(os.getcwd()+'/'+i)
from tensorflow.contrib import slim
from nets import model_train as model
from utils.dataset import data_provider as data_provider
#'input'
tf.app.flags.DEFINE_string('data_folder', "", '')
'output'
tf.app.flags.DEFINE_string('export_checkpoint_path', '', '')
tf.app.flags.DEFINE_string('logs_path', '', '')
tf.app.flags.DEFINE_integer('num_readers', 4, '')
FLAGS = tf.app.flags.FLAGS
def main(argv=None):
status = os.system('sh setup.sh')
print(status)
now = datetime.datetime.now()
StyleTime = now.strftime("%Y-%m-%d-%H-%M-%S")
os.makedirs(FLAGS.logs_path + StyleTime)
if not os.path.exists(FLAGS.export_checkpoint_path):
os.makedirs(FLAGS.export_checkpoint_path)
input_image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
input_bbox = tf.placeholder(tf.float32, shape=[None, 5], name='input_bbox')
input_im_info = tf.placeholder(tf.float32, shape=[None, 3], name='input_im_info')
with tf.device('/gpu:%d' % 0):
with tf.name_scope('model_%d' % 0) as scope:
bbox_pred, cls_pred, cls_prob = model.model(input_image)
#bbox_pred = tf.Print(bbox_pred,[bbox_pred, cls_pred, cls_prob])
total_loss, model_loss, rpn_cross_entropy, rpn_loss_box = model.loss(bbox_pred, cls_pred, input_bbox,
input_im_info)
#total_loss = tf.Print(total_loss,[total_loss, model_loss, rpn_cross_entropy, rpn_loss_box])
with tf.control_dependencies([total_loss,model_loss, rpn_cross_entropy, rpn_loss_box]):
eval_op = tf.no_op(name='eval_op')
saver = tf.train.Saver(tf.global_variables())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.95
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
ckpt = tf.train.latest_checkpoint(FLAGS.export_checkpoint_path)
saver.restore(sess, ckpt)
print('start')
data_generator = data_provider.get_batch(num_workers=FLAGS.num_readers,data_folder = FLAGS.data_folder, shuffle = False)
num_example = len(tf.gfile.ListDirectory(os.path.join(FLAGS.data_folder,'image')))
processed_examples = 0
model_loss_list = []
total_loss_list = []
start = time.time()
while processed_examples < num_example:
data = next(data_generator)
ml, tl, _ = sess.run([model_loss, total_loss, eval_op],
feed_dict={input_image: data[0],
input_bbox: data[1],
input_im_info: data[2]})
processed_examples += len(data[0])
if processed_examples % 10 ==0:
print('processed {} images'.format(processed_examples))
model_loss_list.append(ml)
total_loss_list.append(tl)
print('processed_examples:{}, validation_loss: model loss {:.4f}, total loss {:.4f}'.format(
processed_examples, numpy.mean(model_loss_list), numpy.mean(total_loss_list)))
if __name__ == '__main__':
tf.app.run()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/metrics/word_embedding_metrics.py
|
import logging
from PlatformNlp.metrics import register_metrices
from PlatformNlp.metrics.platform_metrics import PlatformMetrice
from PlatformNlp.tokenization import load_vocab
import json
import numpy as np
logger = logging.getLogger(__name__)
@register_metrices('word_embedding_metrics')
class WordEmbeddingMetrics(PlatformMetrice):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--topk', type=int, default=10, help='topk to show the similarity between words')
parser.add_argument('--vocab_file', type=str, help='vocab file')
def __init__(self, args, input_ids, label_ids, predict_scores, label_mapping):
super().__init__(args, input_ids, label_ids, predict_scores, label_mapping)
self.args = self.args
self.input_ids = input_ids
self.label_ids = label_ids
self.predict_scores = predict_scores
self.label_mapping = label_mapping
def compute_metrices(self):
json_dict = dict()
vocab = load_vocab(self.args.vocab_file)
inv_vocab = {v: k for k, v in vocab.items()}
for (index, input_id) in enumerate(self.input_ids):
similarity = self.predict_scores[index]
similarity = np.array(similarity).reshape(-1, self.args.vocab_size)
for (i, id_word) in enumerate(input_id):
id_word = int(id_word)
similarity_i = list(similarity[i])
similarity_i_ = [-x for x in similarity_i]
similarity_i_ = np.array(similarity_i_)
char_words = inv_vocab.get(id_word, "UNK")
nearst_id = (similarity_i_).argsort()[1:self.args.topk + 1]
nearst_words = [inv_vocab.get(id_n, "UNK") for id_n in nearst_id]
json_dict[char_words] = nearst_words
json_data = json.dumps(json_dict)
return json_data
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/criterions/multi_class_cross_entropy.py
|
import math
import tensorflow as tf
from PlatformNlp.criterions.platform_criterion import PlatformNlpCriterion
from PlatformNlp.criterions import register_criterion
@register_criterion('multi_class_cross_entropy')
class MultiClassCrossEntropyCriterion(PlatformNlpCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
self.task = task
def get_loss(self):
"""Construct a criterion from command-line args."""
output_layer = self.task.model.get_output()
hidden_size = output_layer.shape[-1].value
l2_loss = tf.constant(0.0)
output_weights_layer = tf.get_variable(
"output_weights_layer", [self.args.num_classes, hidden_size],
initializer=tf.contrib.layers.xavier_initializer())
output_bias_layer = tf.get_variable(
"output_bias_layer", [self.args.num_classes], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
is_training = self.task.mode == tf.estimator.ModeKeys.TRAIN
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=self.args.drop_keep_prob)
logits = tf.matmul(output_layer, output_weights_layer, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias_layer)
l2_loss += tf.nn.l2_loss(output_weights_layer)
l2_loss += tf.nn.l2_loss(output_bias_layer)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(self.task.labels, depth=self.args.num_classes, dtype=tf.float32)
per_example_loss_logits = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
per_example_loss = per_example_loss_logits
loss = tf.reduce_mean(per_example_loss) + self.args.l2_reg_lambda * l2_loss
return (loss, per_example_loss, logits, probabilities)
|
jd-aig/aves2_algorithm_components
|
src/cv/face_detection/faceboxes/faceboxes_facedet/train.py
|
<filename>src/cv/face_detection/faceboxes/faceboxes_facedet/train.py<gh_stars>1-10
import tensorflow as tf
import json
import os
from model import model_fn
from src.input_pipeline import Pipeline
import argparse
tf.logging.set_verbosity('INFO')
parser = argparse.ArgumentParser()
parser.add_argument("--train_tfrecord", type=str, default="../data/")
parser.add_argument("--val_tfrecord", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--epochs", type=int, default=20)
args = parser.parse_args()
CONFIG = 'config.json'
GPU_TO_USE = '0'
params = json.load(open(CONFIG))
model_params = params['model_params']
input_params = params['input_pipeline_params']
def get_input_fn(is_training=True):
image_size = input_params['image_size'] if is_training else None
# (for evaluation i use images of different sizes)
dataset_path = args.train_tfrecord if is_training else args.val_tfrecord
batch_size = args.batch_size if is_training else 1
# for evaluation it's important to set batch_size to 1
filenames = os.listdir(dataset_path)
filenames = [n for n in filenames if n.endswith('.tfrecords')]
filenames = [os.path.join(dataset_path, n) for n in sorted(filenames)]
def input_fn():
with tf.device('/cpu:0'), tf.name_scope('input_pipeline'):
pipeline = Pipeline(
filenames,
batch_size=batch_size, image_size=image_size,
repeat=is_training, shuffle=is_training,
augmentation=is_training
)
features, labels = pipeline.get_batch()
return features, labels
return input_fn
config = tf.ConfigProto()
config.gpu_options.visible_device_list = GPU_TO_USE
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(
model_dir=args.output_path,
session_config=config,
save_summary_steps=200,
save_checkpoints_secs=600,
log_step_count_steps=100,
keep_checkpoint_max=1
)
train_input_fn = get_input_fn(is_training=True)
val_input_fn = get_input_fn(is_training=False)
estimator = tf.estimator.Estimator(model_fn, params=model_params, config=run_config)
fid = open(os.path.join(args.train_tfrecord,'num_examples.txt'),'r')
num_examples = int(fid.read())
print('num_examples : ',num_examples)
num_steps = (num_examples*args.epochs)//args.batch_size
if num_steps == 0:
num_steps = 1
fid.close()
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=num_steps)
eval_spec = tf.estimator.EvalSpec(val_input_fn, steps=None, start_delay_secs=1800, throttle_secs=1800)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
|
jd-aig/aves2_algorithm_components
|
src/ml/cluster/kmeans/run.py
|
<filename>src/ml/cluster/kmeans/run.py<gh_stars>1-10
from sklearn.cluster import KMeans
from sklearn.metrics import calinski_harabaz_score
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--n_clusters", type=int, default=3)
parser.add_argument("--n_init", type=int, default=10)
parser.add_argument("--max_iter", type=int, default=300)
args = parser.parse_args()
dataset = os.path.join(args.data_dir,'data.csv')
data = pd.read_csv(dataset)
x = data.ix[:,:].values
model = KMeans(n_clusters=args.n_clusters,n_init=args.n_init,max_iter=args.max_iter)
model.fit(x)
y_pred = model.labels_
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
c_h_score = calinski_harabaz_score(x,y_pred)
json_dict = {}
json_dict["calinski_harabaz_score"] = c_h_score
json_data = json.dumps(json_dict)
f = open(os.path.join(args.output_path,"result.json"),"w")
f.write(str(json_data))
f.close()
pred_csv = pd.concat([data,pd.DataFrame(columns=['PREDICT'],data=y_pred.tolist())],sort=False,axis=1)
pred_csv.to_csv(os.path.join(args.output_path,'result.csv'),float_format = '%.5f')
print('calinski_harabaz_score : ', c_h_score)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/metrics/multi_label_cross_entry_metrics.py
|
import logging
from PlatformNlp.metrics import register_metrices
from PlatformNlp.metrics.platform_metrics import PlatformMetrice
from PlatformNlp.utils import calculate_multi_label
import json
logger = logging.getLogger(__name__)
@register_metrices('multi_label')
class MultiLabelMetrics(PlatformMetrice):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--label_file', type=str, help='label_file for mapping json')
parser.add_argument('--threshold', type=float, help='threshold for classify label')
def __init__(self, args, input_ids, label_ids, predict_scores, label_mapping):
super().__init__(args, input_ids, label_ids, predict_scores, label_mapping)
self.args = self.args
self.input_ids = input_ids
self.label_ids = label_ids
self.predict_scores = predict_scores
self.label_mapping = label_mapping
def compute_metrices(self):
multi_label_pred = []
for i in range(len(self.predict_scores)):
decsion_score = self.predict_scores[i]
multi_label_pred_line = [1 if j > self.args.threshold else 0 for j in decsion_score]
multi_label_pred.append(multi_label_pred_line)
exact_accu, less_accu = calculate_multi_label(multi_label_pred, self.label_ids)
json_dict = dict()
json_dict["exact_accu"] = exact_accu
json_dict["less_accu"] = less_accu
json_data = json.dumps(json_dict)
return json_data
|
jd-aig/aves2_algorithm_components
|
src/cv/face_detection/faceboxes/val_faceboxes/save.py
|
<reponame>jd-aig/aves2_algorithm_components<filename>src/cv/face_detection/faceboxes/val_faceboxes/save.py
import tensorflow as tf
import json
from model import model_fn
import argparse
import os
"""The purpose of this script is to export a savedmodel."""
parser = argparse.ArgumentParser()
parser.add_argument("--output_path", type=str, default="./output/")
args = parser.parse_args()
CONFIG = 'config.json'
OUTPUT_FOLDER = os.path.join(args.output_path,'tmp')
GPU_TO_USE = '0'
WIDTH, HEIGHT = None, None
# size of an input image,
# set (None, None) if you want inference
# for images of variable size
tf.logging.set_verbosity('INFO')
params = json.load(open(CONFIG))
model_params = params['model_params']
config = tf.ConfigProto()
config.gpu_options.visible_device_list = GPU_TO_USE
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(
model_dir=args.output_path,
session_config=config
)
estimator = tf.estimator.Estimator(model_fn, params=model_params, config=run_config)
def serving_input_receiver_fn():
images = tf.placeholder(dtype=tf.uint8, shape=[None, HEIGHT, WIDTH, 3], name='image_tensor')
features = {'images': tf.to_float(images)*(1.0/255.0)}
return tf.estimator.export.ServingInputReceiver(features, {'images': images})
estimator.export_savedmodel(
OUTPUT_FOLDER, serving_input_receiver_fn
)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/metrics/ner_metrics.py
|
from PlatformNlp.metrics import register_metrices
from PlatformNlp.metrics.platform_metrics import PlatformMetrice
from PlatformNlp.tokenization import load_vocab
from PlatformNlp import conlleval
import logging
import json
import os
logger = logging.getLogger(__name__)
@register_metrices('ner_metrics')
class NerMetrics(PlatformMetrice):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--num_classes', type=int, default=11, help='ner classes')
def __init__(self, args, input_ids, label_ids, predict_scores, label_mapping):
super().__init__(args, input_ids, label_ids, predict_scores, label_mapping)
self.args = self.args
self.input_ids = input_ids
self.label_ids = label_ids
self.predict_scores = predict_scores
self.label_mapping = label_mapping
def compute_metrices(self):
predict_ids = self.predict_scores
label_ids = self.label_ids
vocab = load_vocab(self.args.vocab_file)
output_predict_file = os.path.join(self.args.output_dir, "ner_eval.txt")
inv_vocab = {v: k for k, v in vocab.items()}
inv_label = {v: k for k, v in self.label_mapping.items()}
for (index, input_id) in enumerate(self.input_ids):
line = ""
label_id = list(label_ids[index])
pred_id = list(predict_ids[index])
for (i, id) in enumerate(input_id):
char_words = inv_vocab.get(id, "UNK")
label = inv_label.get(label_id[i])
pred = inv_label.get(pred_id[i])
if pred is None:
pred = "O"
line += char_words + ' ' + label + ' ' + pred + '\n'
line += "\n"
with open(output_predict_file, 'a', encoding='utf-8') as writer:
writer.write(line)
writer.flush()
eval_result = conlleval.return_report(output_predict_file)
json_data = json.dumps(eval_result)
return json_data
|
jd-aig/aves2_algorithm_components
|
src/cv/face_detection/faceboxes/val_faceboxes/val.py
|
import tensorflow as tf
import json
import os
from model import model_fn
from src.input_pipeline import Pipeline
import argparse
tf.logging.set_verbosity('INFO')
parser = argparse.ArgumentParser()
parser.add_argument("--val_tfrecord", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./output/")
parser.add_argument("--output_path", type=str, default="./output/")
args = parser.parse_args()
CONFIG = 'config.json'
GPU_TO_USE = '0'
params = json.load(open(CONFIG))
model_params = params['model_params']
input_params = params['input_pipeline_params']
def get_input_fn(is_training=True):
image_size = input_params['image_size'] if is_training else None
# (for evaluation i use images of different sizes)
dataset_path = args.val_tfrecord
batch_size = 1
# for evaluation it's important to set batch_size to 1
filenames = os.listdir(dataset_path)
filenames = [n for n in filenames if n.endswith('.tfrecords')]
filenames = [os.path.join(dataset_path, n) for n in sorted(filenames)]
def input_fn():
with tf.device('/cpu:0'), tf.name_scope('input_pipeline'):
pipeline = Pipeline(
filenames,
batch_size=batch_size, image_size=image_size,
repeat=is_training, shuffle=is_training,
augmentation=is_training
)
features, labels = pipeline.get_batch()
return features, labels
return input_fn
config = tf.ConfigProto()
config.gpu_options.visible_device_list = GPU_TO_USE
run_config = tf.estimator.RunConfig()
run_config = run_config.replace(
model_dir=args.model_dir,
session_config=config,
save_summary_steps=200,
save_checkpoints_secs=600,
log_step_count_steps=100,
keep_checkpoint_max=1
)
val_input_fn = get_input_fn(is_training=False)
estimator = tf.estimator.Estimator(model_fn, params=model_params, config=run_config)
eval_metrics = estimator.evaluate(input_fn=val_input_fn,checkpoint_path=os.path.join(args.model_dir,'model.ckpt'))
print(eval_metrics)
json_dict={}
json_dict["recall"] = str(eval_metrics['metrics/recall'])
json_dict["precision"] = str(eval_metrics['metrics/precision'])
json_dict["AP"] = str(eval_metrics['metrics/AP'])
json_data = json.dumps(json_dict)
f=open(os.path.join(args.output_path,"result.json"),"w")
f.write(str(json_data))
f.close()
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/preprocessing/new_vgg_preprocessing.py
|
<reponame>jd-aig/aves2_algorithm_components
# it is a reconsitution of vgg_preprocessing with opencv
# ==============================================================================
"""Provides utilities to preprocess images.
The preprocessing steps for VGG were introduced in the following technical
report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
<NAME> and <NAME>
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy
import cv2
slim = tf.contrib.slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def preprocess_for_train(image,
output_height,
output_width,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
Returns:
A preprocessed image.
"""
def opencv_preprocess_for_train(image,height,width):
h,w,_ = image.shape
resize_side = numpy.random.randint(low=resize_side_min, high=resize_side_max+1,size=1)
if h < w :
resize_h = resize_side
resize_w = w*resize_side/h
else:
resize_w = resize_side
resize_h = h*resize_side/w
resized_image = cv2.resize(image,(int(resize_w),int(resize_h)),interpolation = cv2.INTER_LINEAR)
# w,h
#print(resized_image.shape)
begin_h = numpy.random.randint(low=0, high=resize_h-height, size=1)
begin_w = numpy.random.randint(low=0, high=resize_w-width, size=1)
#croped_image = resized_image[0:100,100:200,:]
croped_image = resized_image[int(begin_h):int(begin_h+height),int(begin_w):int(begin_w+width),:]
lr_flip = cv2.flip(croped_image, 1) if numpy.random.uniform()>0.5 else croped_image
lr_flip[:,:,0] = lr_flip[:,:,0]-123.68 #r
lr_flip[:,:,1] = lr_flip[:,:,1]-116.78 #g
lr_flip[:,:,2] = lr_flip[:,:,2]-103.94 #b
return lr_flip
image = tf.py_func(opencv_preprocess_for_train, [image,output_height,output_width], tf.float32)
#image = tf.Print(image,['after preprocess',image],summarize=100)
return image
def preprocess_for_eval(image, output_height, output_width, resize_side_vgg):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
def opencv_preprocess_for_eval(image,height,width):
h,w,_ = image.shape
resize_side = resize_side_vgg
if h < w :
resize_h = resize_side
resize_w = w*resize_side/h
else:
resize_w = resize_side
resize_h = h*resize_side/w
resized_image = cv2.resize(image,(int(resize_w),int(resize_h)),interpolation = cv2.INTER_LINEAR)
#print(resized_image.shape)
begin_h = int((resize_h-height)*0.5)
begin_w = int((resize_w-width)*0.5)
croped_image = resized_image[begin_h:int(begin_h+height),begin_w:int(begin_w+width),:]
croped_image[:,:,0] = croped_image[:,:,0]-123.68 #r
croped_image[:,:,1] = croped_image[:,:,1]-116.78 #g
croped_image[:,:,2] = croped_image[:,:,2]-103.94 #b
#print(croped_image.shape)
return croped_image
output_height = tf.convert_to_tensor(output_height)
output_width = tf.convert_to_tensor(output_width)
image = tf.py_func(opencv_preprocess_for_eval, [image,output_height,output_width], tf.float32)
return image
def preprocess_image(image, output_height, output_width, is_training=False,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min, resize_side_max)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min)
|
jd-aig/aves2_algorithm_components
|
src/ml/regression/decisionTree/run.py
|
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="MEDV")
parser.add_argument("--criterion", type=str, default="mse")
parser.add_argument("--max_depth", type=str, default="None")
parser.add_argument("--min_samples_split", type=int, default=2)
args = parser.parse_args()
train_dataset = os.path.join(args.data_dir,'train.csv')
if not os.path.exists(train_dataset):
print("ERROR: train.csv is not exists!")
exit()
train_data = pd.read_csv(train_dataset)
lst = train_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_train = train_data.ix[:,args.target].values
x_train = train_data.ix[:,lst].values
if args.max_depth == "None":
max_depth = None
else:
max_depth = int(args.max_depth)
model = DecisionTreeRegressor(max_depth=max_depth,min_samples_split=args.min_samples_split,criterion=args.criterion)
model.fit(x_train,y_train)
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
print("DecisionTreeRegressor train finished.save model in model/output/model.m")
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/utils.py
|
import importlib.util
import logging
import os
import sys
import warnings
import tensorflow as tf
from typing import Callable, Dict, List, Optional
import sklearn.metrics
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
import numpy as np
from scipy import interp
logger = logging.getLogger(__name__)
MANIFOLD_PATH_SEP = "|"
def split_paths(paths: str) -> List[str]:
return paths.split(os.pathsep) if "://" not in paths else paths.split(MANIFOLD_PATH_SEP)
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def total_sample(file_name):
sample_nums = 0
for _ in tf.python_io.tf_record_iterator(file_name):
sample_nums += 1
return sample_nums
# 读取tfrecords文件
def decode_from_tfrecords(filename, name_to_features, seq_length=128, num_epoch=None):
if not isinstance(filename, list):
filename = [filename]
filename_queue = tf.train.string_input_producer(filename, num_epochs=num_epoch)
reader = tf.TFRecordReader()
_, serialized = reader.read(filename_queue)
example = tf.parse_single_example(serialized, features=name_to_features)
# tf.Example only supports tf.int64, but the CPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
if 'input_ids' in example:
input_ids = example['input_ids']
else:
input_ids = None
if isinstance(input_ids, tf.SparseTensor):
input_ids = tf.sparse_tensor_to_dense(input_ids)
label = example['label_ids']
return input_ids, label
def get_real_label(label, num):
labels = []
if label is None:
return labels
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(num):
label_i = sess.run(label)
labels.append(label_i)
return labels
# 得到AUC的值 #
def Auc_value(y_pred, y_real, classes):
classes = [int(c) for c in classes]
if len(classes) > 2:
y_real_label = label_binarize(y_real, classes=classes)
y_pred_label = label_binarize(y_pred, classes=classes)
else:
y_real_label = np.zeros((len(y_real), 2), dtype=np.float32)
y_pred_label = np.zeros((len(y_pred), 2), dtype=np.float32)
for i in range(len(y_real)):
if (y_real[i] == 0):
y_real_label[i] = np.array([1.0, 0.0])
else:
y_real_label[i] = np.array([0.0, 1.0])
for i in range(len(y_pred)):
if (y_pred[i] == 0):
y_pred_label[i] = np.array([1.0, 0.0])
else:
y_pred_label[i] = np.array([0.0, 1.0])
y_pred = np.array(y_pred_label)
n_classes = len(classes)
# 计算每一类的ROC
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_real_label[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area(方法二)
fpr["micro"], tpr["micro"], _ = roc_curve(y_real_label.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area(方法一)
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
return roc_auc["macro"], roc_auc["micro"]
def calculate_label(y_pred, y_real, classes):
assert len(y_pred) == len(y_real), "pred num and real num should be equal"
precision = sklearn.metrics.precision_score(y_true=y_real, y_pred=y_pred, average='macro')
recall = sklearn.metrics.recall_score(y_true=y_real, y_pred=y_pred, average='macro')
accuracy = sklearn.metrics.accuracy_score(y_true=y_real, y_pred=y_pred)
f1_score = sklearn.metrics.f1_score(y_true=y_real, y_pred=y_pred, average='macro')
classify_report = sklearn.metrics.classification_report(y_true=y_real, y_pred=y_pred)
auc_macro, auc_micro = Auc_value(y_pred, y_real, classes)
return precision, recall, accuracy, f1_score, auc_micro, auc_macro, classify_report
def calculate_multi_label(y_pred, y_real):
assert len(y_pred) == len(y_real), "pred num and real num should be equal"
all_num = len(y_pred)
correct_num = 0
less_num = 0
for i in range(len(y_pred)):
real = y_real[i]
pred = y_pred[i]
same = True
for j in range(len(real)):
if real[j] != pred[j]:
same = False
break
if same:
correct_num += 1
less = True
for j in range(len(real)):
if real[j] != pred[j]:
if real[j] == 0 and pred[j] == 1:
less = False
break
if less:
less_num += 1
exact_accu = correct_num / all_num
less_accu = less_num / all_num
return exact_accu, less_accu
|
jd-aig/aves2_algorithm_components
|
src/ml/classification/randomForest/run.py
|
<reponame>jd-aig/aves2_algorithm_components
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix
from sklearn.externals import joblib
import numpy as np
import pandas as pd
import argparse
import os
import json
import time
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="virginica")
parser.add_argument("--n_estimators", type=int, default=10)
parser.add_argument("--n_jobs", type=int, default=1)
args = parser.parse_args()
train_dataset = os.path.join(args.data_dir,'train.csv')
train_data = pd.read_csv(train_dataset)
lst = train_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_train = train_data.ix[:,args.target].values
x_train = train_data.ix[:,lst].values
model = RandomForestClassifier(n_estimators=args.n_estimators,n_jobs=args.n_jobs)
model.fit(x_train,y_train)
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/cosine_score.py
|
import tensorflow as tf
def get_cosine_score(query_arr, doc_arr):
# query_norm = sqrt(sum(each x^2))
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.square(query_arr), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.square(doc_arr), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(query_arr, doc_arr), 1)
cos_scores = tf.div(pooled_mul_12, pooled_len_1 * pooled_len_2 + 1e-8, name="cos_scores")
return cos_scores
|
jd-aig/aves2_algorithm_components
|
src/ml/cluster/gmm/run.py
|
from sklearn.mixture import GaussianMixture
from sklearn.metrics import calinski_harabaz_score
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--n_components", type=int, default=2)
parser.add_argument("--covariance_type", type=str, default='full')#full,tied,diag,spherical
parser.add_argument("--tol", type=float, default=0.001)
parser.add_argument("--max_iter", type=int, default=100)
args = parser.parse_args()
dataset = os.path.join(args.data_dir,'data.csv')
data = pd.read_csv(dataset)
x = data.ix[:,:].values
model = GaussianMixture(n_components=args.n_components, covariance_type=args.covariance_type, tol=args.tol, max_iter=args.max_iter)
model.fit(x)
y_pred = model.predict(x)
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
c_h_score = calinski_harabaz_score(x,y_pred)
json_dict = {}
json_dict["calinski_harabaz_score"] = c_h_score
json_data = json.dumps(json_dict)
f = open(os.path.join(args.output_path,"result.json"),"w")
f.write(str(json_data))
f.close()
pred_csv = pd.concat([data,pd.DataFrame(columns=['PREDICT'],data=y_pred.tolist())],sort=False,axis=1)
pred_csv.to_csv(os.path.join(args.output_path,'result.csv'),float_format = '%.5f')
print('calinski_harabaz_score : ', c_h_score)
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/model_train.py
|
<filename>src/cv/image_classification/model_train.py
# from __future__ import print_function
import tensorflow as tf
# the same as neuhub
import os
import numpy
import time
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.python.framework import function
from tensorflow.python.platform import gfile
import Configure_file
import performence
tf.app.flags.DEFINE_string('pre_trained_model_ckpt_path', '', '')
tf.app.flags.DEFINE_string('checkpoint_dir', '', '')
tf.app.flags.DEFINE_string('summary_dir', '', '')
tf.app.flags.DEFINE_string('result_dir', '', '')
tf.app.flags.DEFINE_string('train_data_dir', '', '')
tf.app.flags.DEFINE_string('validation_data_dir', '', '')
tf.app.flags.DEFINE_integer('num_class', 37, 'the number of training sample categories')
tf.app.flags.DEFINE_string('model_name', 'vgg_16', '')
tf.app.flags.DEFINE_integer('batch_size', 32, '')
tf.app.flags.DEFINE_float('epochs', 100, '')
tf.app.flags.DEFINE_float('decay_rate', 0.92, '')
tf.app.flags.DEFINE_float('decay_epochs', 3, '')
tf.app.flags.DEFINE_string('lr_decay', 'exponential_decay',
'exponential_decay, natural_exp_decay,polynomial_decay,fixed')
tf.app.flags.DEFINE_integer('display_every_steps', 50, '')
tf.app.flags.DEFINE_integer('eval_every_epochs', 5, '')
tf.app.flags.DEFINE_integer('fine_tune', 1, 'whether the model is trained from a pre-trained model')
tf.app.flags.DEFINE_integer('early_stop', 1, 'whether to stop training model early')
FLAGS = tf.app.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
slim = tf.contrib.slim
height = Configure_file.configure_image_size(FLAGS.model_name)
width = height
display_every_steps = FLAGS.display_every_steps
batch_size = FLAGS.batch_size
def preprocess_fn(value, batch_position, is_training):
from pre import parse_example_proto
image_buffer, label, filename = parse_example_proto(value)
from pre import preprocess
images = preprocess(image_buffer, is_training, FLAGS.model_name, height, width)
return (images, label, filename)
def train_data_generator(batch_size):
with tf.name_scope('train_batch_processing'):
data_dir = FLAGS.train_data_dir
glob_pattern = os.path.join(data_dir, 'train-*-of-*')
file_names = gfile.Glob(glob_pattern)
import random
random.shuffle(file_names)
ds = tf.data.TFRecordDataset.list_files(file_names)
ds = ds.apply(interleave_ops.parallel_interleave(tf.data.TFRecordDataset, cycle_length=10))
counter = tf.data.Dataset.range(batch_size)
counter = counter.repeat()
flags = tf.data.Dataset.from_tensors(tf.constant('train'))
flags = flags.repeat()
ds = tf.data.Dataset.zip((ds, counter, flags))
ds = ds.prefetch(buffer_size=batch_size * 4)
ds = ds.shuffle(buffer_size=1000)
ds = ds.repeat()
ds = ds.apply(batching.map_and_batch(map_func=preprocess_fn, batch_size=batch_size, num_parallel_batches=10))
ds = ds.prefetch(buffer_size=10)
from tensorflow.contrib.data.python.ops import threadpool
ds = threadpool.override_threadpool(ds, threadpool.PrivateThreadPool(10,
display_name='input_pipeline_thread_pool'))
# ds_iterator = ds.make_initializable_iterator()
return ds
def validation_data_generator(batch_size):
with tf.name_scope('validation_batch_processing'):
data_dir = FLAGS.validation_data_dir
glob_pattern = os.path.join(data_dir, 'validation-*-of-*')
file_names = gfile.Glob(glob_pattern)
ds = tf.data.TFRecordDataset.list_files(file_names)
ds = ds.apply(interleave_ops.parallel_interleave(tf.data.TFRecordDataset, cycle_length=10))
counter = tf.data.Dataset.range(batch_size)
counter = counter.repeat()
flags = tf.data.Dataset.from_tensors(tf.constant('validation'))
flags = flags.repeat()
ds = tf.data.Dataset.zip((ds, counter, flags))
ds = ds.prefetch(buffer_size=batch_size * 4)
ds = ds.apply(batching.map_and_batch(map_func=preprocess_fn, batch_size=batch_size, num_parallel_batches=10))
ds = ds.prefetch(buffer_size=10)
from tensorflow.contrib.data.python.ops import threadpool
ds = threadpool.override_threadpool(ds, threadpool.PrivateThreadPool(10,
display_name='input_pipeline_thread_pool'))
# ds_iterator = ds.make_initializable_iterator()
return ds
def main(argv=None):
import json
train_num_examples_path = FLAGS.train_data_dir + 'num_examples.json'
validation_num_examples_path = FLAGS.validation_data_dir + 'num_examples.json'
with open(train_num_examples_path) as load_f:
load_dict = json.load(load_f)
train_num_examples = load_dict['the total number of available samples']
with open(validation_num_examples_path) as load_f:
load_dict = json.load(load_f)
validation_num_examples = load_dict['the total number of available samples']
num_classes = FLAGS.num_class
train_batch_size = batch_size
eval_batch_size = batch_size
init_learning_rate = Configure_file.configure_init_learning_rate(FLAGS.model_name)
eval_every_epochs = FLAGS.eval_every_epochs
eval_every_steps = int(eval_every_epochs * train_num_examples / train_batch_size)
while eval_every_steps == 0:
eval_every_epochs = eval_every_epochs + eval_every_epochs
eval_every_steps = int(eval_every_epochs * train_num_examples / train_batch_size)
if int(FLAGS.epochs * train_num_examples / train_batch_size) == 0:
train_steps = 1
else:
train_steps = int(FLAGS.epochs * train_num_examples / train_batch_size)
if int(validation_num_examples / eval_batch_size) == 0:
eval_steps = 1
eval_batch_size = validation_num_examples
else:
eval_steps = int(validation_num_examples / eval_batch_size)
print('get information')
fine_tune_flag = 'True' if FLAGS.fine_tune == 1 else 'False'
early_stop_flag = 'True' if FLAGS.early_stop == 1 else 'False'
print('---' * 20)
print('model for classification : %s' % FLAGS.model_name)
print('input height and width : %d' % height)
print('whether to fine tune : %s' % fine_tune_flag)
print('whether to early stop : %s' % early_stop_flag)
print('number of train samples : %d' % train_num_examples)
print('number of train classes : %d' % FLAGS.num_class)
print('eval every epochs training %d' % eval_every_epochs)
print('train steps : %d' % train_steps)
print('eval steps : %d' % eval_steps)
print('train batch size : %d' % train_batch_size)
print('eval batch size : %d' % eval_batch_size)
print('init_learning rate :%f' % init_learning_rate)
print('lr deay policy :%s' % FLAGS.lr_decay)
print('---' * 20)
is_training_flag = tf.placeholder(
tf.bool,
shape=None,
name='is_training_flag')
weight_decay = tf.placeholder_with_default(Configure_file.configure_weight_decay(FLAGS.model_name), [])
global_steps = tf.train.get_or_create_global_step()
decay_steps = FLAGS.decay_epochs * train_num_examples / train_batch_size
lr = Configure_file.configure_lr(init_learning_rate, FLAGS.lr_decay, decay_steps,
FLAGS.decay_rate, global_steps)
# (init_lr,decay_policy,decay_steps,decay_rate,global_steps,warm_lr=0.0001,warm_steps=0)
# tf.summary.scalar('learning_rate', lr)
# opt = Configure_file.configure_optimizer(FLAGS.optimizer, lr)
opt = Configure_file.model_optimizer(FLAGS.model_name, lr)
# (optimizer,learning_rate)
train_dataset = train_data_generator(batch_size=train_batch_size)
validation_dataset = validation_data_generator(batch_size=eval_batch_size)
iterator = tf.data.Iterator.from_structure(output_types=train_dataset.output_types,
output_shapes=train_dataset.output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
validation_init_op = iterator.make_initializer(validation_dataset)
images, labels, filenames = iterator.get_next()
images = tf.reshape(images, shape=[-1, height, width, 3])
labels = tf.reshape(labels, [-1])
from nets import nets_factory
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=num_classes,
weight_decay=weight_decay,
is_training=is_training_flag)
logits, end_points = network_fn(images)
pred_soft = tf.nn.softmax(logits)
values, indices = tf.nn.top_k(pred_soft, 1)
if 'AuxLogits' in end_points:
aux_cross_entropy = 0.4 * tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=end_points['AuxLogits'], labels=labels,
name='aux_cross-entropy'))
else:
aux_cross_entropy = 0
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross-entropy'))
loss = cross_entropy + aux_cross_entropy
with tf.name_scope('accuracy'):
top_1 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(pred_soft, labels, 1), dtype=tf.float32), name='top_1')
top_5 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(pred_soft, labels, 5), dtype=tf.float32), name='top_5')
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('aux_cross_entropy', aux_cross_entropy)
tf.summary.scalar('loss', loss)
tf.summary.scalar('top1', top_1)
tf.summary.scalar('top5', top_5)
for i in tf.global_variables():
tf.summary.histogram(i.name.replace(":", "_"), i)
# recommended
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_step = opt.minimize(loss, global_step=global_steps)
# ema = tf.train.ExponentialMovingAverage(decay=0.9999)
with tf.control_dependencies([train_step, loss, top_1, top_5]): # , variables_averages_op,batch_norm_updates_op]):
train_op = tf.no_op(name='train_op')
# train_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([loss, top_1, top_5]):
validation_op = tf.no_op(name='validation_op')
config = tf.ConfigProto()
config.allow_soft_placement = True
checkpoint_dir = FLAGS.checkpoint_dir
g_list = tf.global_variables()
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
# print(bn_moving_vars)
store_restore_var_list = tf.trainable_variables() + bn_moving_vars # + tf.moving_average_variables()
# print(store_restore_var_list)
saver = tf.train.Saver(store_restore_var_list, max_to_keep=1)
checkpoint_basename = FLAGS.model_name + '.ckpt'
if FLAGS.fine_tune == 1:
exclusions = Configure_file.model_exclusions(FLAGS.model_name)
print('variables to exclude :' + str(exclusions))
variables_to_restore = []
for var in store_restore_var_list:
flag_break = 0
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
flag_break = 1
break
if flag_break == 0:
variables_to_restore.append(var)
pre_train_saver = tf.train.Saver(variables_to_restore)
#print(tf.gfile.IsDirectory(FLAGS.pre_trained_model_ckpt_path))
if tf.gfile.IsDirectory(FLAGS.pre_trained_model_ckpt_path):
# print(tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*'))
if tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*.ckpt'):
ckpt_path = tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*.ckpt')[0]
print('there is one ckpt file ' + str(ckpt_path))
elif tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*.ckpt.*'):
ckpts = tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*.ckpt.*')[0]
ckpt_path = ckpts.rsplit('.', 1)[0]
print('there is one ckpt file ' + str(ckpt_path))
# imagenet pretrained model
elif tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*.ckpt-*'):
ckpts = tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*.ckpt-*')[0]
ckpt_path = ckpts.rsplit('.', 1)[0]
print('there is more than one ckpt files ' + str(ckpt_path))
# pipline pretrained model
else:
ckpt_path = FLAGS.pre_trained_model_ckpt_path
print(ckpt_path)
# ckpt_path = tf.gfile.Glob(FLAGS.pre_trained_model_ckpt_path + FLAGS.model_name + '*')
def load_pretrain(scaffold, sess):
pre_train_saver.restore(sess, ckpt_path)
scaffold = tf.train.Scaffold(init_fn=load_pretrain, saver=pre_train_saver)
else:
scaffold = None
hooks = [
tf.train.SummarySaverHook(save_steps=100, save_secs=None, output_dir=FLAGS.summary_dir, summary_writer=None,
scaffold=None, summary_op=tf.summary.merge_all())
]
early_stop_param = {}
early_stop_param['count'] = 0
early_stop_param['top1_max'] = 0
with tf.train.MonitoredTrainingSession(checkpoint_dir=None,
config=config,
scaffold=scaffold,
hooks=hooks,
stop_grace_period_secs=1200
) as mon_sess:
if FLAGS.early_stop == 1:
print('start traing with early stop')
mon_sess._coordinated_creator.tf_sess.run(train_init_op)
time0 = time.time()
import early_stop
global_step = 0
while global_step < train_steps:
if (global_step) % eval_every_steps == 0 and global_step > 0:
print('start validating')
time_va = time.time()
mon_sess._coordinated_creator.tf_sess.run(validation_init_op)
loss_list = []
tt1 = []
lla_batch = []
ffile_batch = []
llo_batch = []
for i in range(eval_steps):
_, batch_loss, t1, la_batch, file_batch, lo_batch = mon_sess.run([validation_op, loss, top_1, labels, filenames, pred_soft],
feed_dict={is_training_flag: False, weight_decay: 0.0})
loss_list.append(batch_loss)
tt1.append(t1)
lla_batch.extend(la_batch)
ffile_batch.extend(file_batch)
llo_batch.extend(lo_batch.tolist())
validation_loss = numpy.mean(numpy.asarray(loss_list))
validation_top1 = numpy.mean(numpy.asarray(tt1))
mon_sess._coordinated_creator.tf_sess.run(train_init_op)
th = eval_steps * eval_batch_size / (time.time() - time_va)
print('done validating, validation loss is %f , top1 is %f , throughout is %f ' % (
validation_loss, validation_top1, th))
global_step = global_step + 1
# early_stop_param = early_stop.early_stop(validation_loss,early_stop_param)
if validation_top1 > early_stop_param['top1_max']:
print('Saving checkpoints')
saver.save(
mon_sess._coordinated_creator.tf_sess,
save_path=os.path.join(checkpoint_dir, checkpoint_basename),
global_step=global_step,
latest_filename=None,
meta_graph_suffix='meta',
write_meta_graph=True,
write_state=True,
strip_default_attrs=False
)
performence.per(lla_batch,llo_batch,ffile_batch,FLAGS.result_dir)
early_stop_param = early_stop.top1_early_stop(validation_top1, early_stop_param)
if early_stop_param['count'] >= 3:
print('train process should stop')
break
if (global_step + 1) % display_every_steps == 0 and global_step > 0:
global_step, _, batch_loss, top1, top5 = mon_sess.run([global_steps, train_op, loss, top_1, top_5],
feed_dict={is_training_flag: True})
th = display_every_steps * train_batch_size / (time.time() - time0)
time0 = time.time()
print('global_step: %d, train_loss: %f, top1: %f, top5: %f , throughout is %f' % (
global_step, batch_loss, top1, top5, th))
else:
global_step, _ = mon_sess.run([global_steps, train_op], feed_dict={is_training_flag: True})
else:
print('start training without early stop')
mon_sess._coordinated_creator.tf_sess.run(train_init_op)
time0 = time.time()
global_step = 0
while global_step < train_steps:
if (global_step + 1) % display_every_steps == 0 and global_step > 0:
global_step, _, batch_loss, top1, top5 = mon_sess.run([global_steps, train_op, loss, top_1, top_5],
feed_dict={is_training_flag: True})
th = display_every_steps * train_batch_size / (time.time() - time0)
time0 = time.time()
print('global_step: %d, train_loss: %f, top1: %f, top5: %f , throughout is %f' % (
global_step, batch_loss, top1, top5, th))
else:
global_step, _ = mon_sess.run([global_steps, train_op], feed_dict={is_training_flag: True})
print('starting collecting data')
if __name__ == '__main__':
tf.app.run()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/models/textcnn.py
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ['TextCNNModel']
__author__ = 'xulu46'
__date__ = '2019.09.29'
"""The main textcnn model and related functions."""
import copy
import tensorflow as tf
from PlatformNlp.modules.conv_layer import conv_layer
from PlatformNlp.modules.embedding_lookup import embedding_lookup
from PlatformNlp.models import register_model, register_model_architecture
from PlatformNlp.models.platform_model import PlatformModel
@register_model('textcnn')
class TextCNNModel(PlatformModel):
"""
```python
# Already been converted into WordPiece token ids
...
```
"""
def __init__(self, features, vocab_size, embedding_size, filter_sizes, num_filters, initializer_range):
super().__init__()
with tf.variable_scope("textcnn"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
input_ids = features["input_ids"]
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=vocab_size,
embedding_size=embedding_size,
initializer_range=initializer_range,
word_embedding_name="word_embeddings",
embedding_initializer=None)
self.embedding_output = tf.expand_dims(self.embedding_output, axis=[-1])
with tf.variable_scope("conv"):
self.pooled_output = conv_layer(
input_tensor=self.embedding_output,
filter_sizes=filter_sizes,
num_filters=num_filters,
initializer_range=initializer_range)
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(self.pooled_output, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
def get_embedding(self):
return self.embedding_table
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--vocab_size', type=int,
help='vocab size')
parser.add_argument('--embedding_size', type=int,
help='textcnn embedding dimension')
parser.add_argument('--filter_sizes', type=str,
help='filter size for conv layer')
parser.add_argument('--num_filters', type=int,
help='num filter for each filter')
parser.add_argument('--l2_reg_lambda', type=float,
help='l2 reg')
parser.add_argument('--drop_keep_prob', type=float,
help='dropout prob for textcnn output layer')
parser.add_argument('--initializer_range', type=float,
help='initializer range for embedding')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
filter_sizes = "2,3,4" if args.filter_sizes is None else args.filter_sizes
filter_sizes = filter_sizes.split(",")
filter_sizes = [int(filter_size) for filter_size in filter_sizes]
return TextCNNModel(task.features, args.vocab_size, args.embedding_size, filter_sizes, args.num_filters, args.initializer_range)
def get_output(self):
return self.h_pool_flat
@register_model_architecture('textcnn', 'textcnn')
def base_architecture(args):
args.embedding_size = 128 if args.embedding_size is None else args.embedding_size
args.max_seq_length = 200 if args.max_seq_length is None else args.max_seq_length
args.filter_sizes = getattr(args, 'filter_sizes', "2,3,4")
args.num_filters =128 if args.num_filters is None else args.num_filters
args.l2_reg_lambda = 0.1 if args.l2_reg_lambda is None else args.l2_reg_lambda
args.drop_keep_prob = 0.9 if args.drop_keep_prob is None else args.drop_keep_prob
args.initializer_range = 0.1 if args.initializer_range is None else args.initializer_range
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/data/base_dataset.py
|
<reponame>jd-aig/aves2_algorithm_components
class BaseDataset():
"""Loader for MultiClass dDataset"""
def __init__(self, args):
self.label_mapping = {}
|
jd-aig/aves2_algorithm_components
|
src/ml/regression/randomForest/run.py
|
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="MEDV")
parser.add_argument("--n_estimators", type=int, default=10)
parser.add_argument("--n_jobs", type=int, default=1)
args = parser.parse_args()
train_dataset = os.path.join(args.data_dir,'train.csv')
if not os.path.exists(train_dataset):
print("ERROR: train.csv is not exists!")
exit()
train_data = pd.read_csv(train_dataset)
lst = train_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_train = train_data.ix[:,args.target].values
x_train = train_data.ix[:,lst].values
model = RandomForestRegressor(n_estimators=args.n_estimators,n_jobs=args.n_jobs)
model.fit(x_train,y_train)
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
print("RandomForestRegressor train finished.save model in model/output/model.m")
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/__init__.py
|
<filename>src/nlp/PlatformNlp/__init__.py
import PlatformNlp.criterions # noqa
import PlatformNlp.models # noqa
import PlatformNlp.modules # noqa
import PlatformNlp.optim # noqa
import PlatformNlp.tasks # noqa
import PlatformNlp.metrics
|
jd-aig/aves2_algorithm_components
|
src/nlp/Platform_cli/train.py
|
import argparse
import logging
import time
import sys
import os
import shutil
sys.path.append("../")
sys.path.append("../PlatformNlp/")
from PlatformNlp.utils import total_sample
import tensorflow as tf
from PlatformNlp import (
checkpoint_util,
options,
tasks,
utils,
)
from PlatformNlp.optim.AdamWeightDecayOptimizer import create_optimizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger("Platform_cli.train")
def model_fn_builder(args, task, num_train_steps, num_warm_up_steps):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
task.features = features
label_id = features["label_ids"]
task.labels = label_id
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_id)[0], dtype=tf.float32)
from PlatformNlp import models
task.mode = mode
task.build_model()
print("build model done")
criterion = task.build_criterion(args)
(total_loss, per_example_loss, logits, probabilities) = criterion.get_loss()
tvars = tf.trainable_variables()
initialized_variable_names = {}
if args.init_checkpoint:
(assignment_map, initialized_variable_names
) = checkpoint_util.get_assignment_map_from_checkpoint(tvars, args.init_checkpoint)
tf.train.init_from_checkpoint(args.init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
predictions = {"output": probabilities}
export_outputs = {
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: tf.estimator.export.PredictOutput(
predictions)}
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = args.learning_rate
train_op = create_optimizer(total_loss, learning_rate, num_train_steps, num_warm_up_steps)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
export_outputs=export_outputs)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = metric_fn(per_example_loss, label_id, logits, is_real_example)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metrics)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
export_outputs=export_outputs)
return output_spec
return model_fn
def main(args):
utils.import_user_module(args)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
# Build model and criterion
tf.logging.set_verbosity(tf.logging.INFO)
start = time.time()
if args.model_dir is not None and args.init_checkpoint is not None:
args.init_checkpoint = os.path.join(args.model_dir, args.init_checkpoint)
else:
args.init_checkpoint = None
if args.output_dir is not None and os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
if args.output_dir is not None and not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
eval_dir = os.path.join(args.output_dir, "eval")
if eval_dir is not None and not os.path.exists(eval_dir):
os.mkdir(eval_dir)
if args.max_seq_length > 520:
raise ValueError(
"Cannot use sequence length %d because the textcnn model "
"was only trained up to sequence length %d" %
(args.max_seq_length, 520))
os.environ['CUDA_VISIBLE_DEVICES'] = args.device_map
session_config = tf.ConfigProto(
inter_op_parallelism_threads=args.inter_op_parallelism_threads,
intra_op_parallelism_threads=args.intra_op_parallelism_threads,
allow_soft_placement=True)
log_every_n_steps = 8
run_config = tf.estimator.RunConfig(
log_step_count_steps=log_every_n_steps,
save_checkpoints_steps=args.save_checkpoints_steps,
session_config=session_config,
model_dir=args.output_dir)
num_examples = total_sample(args.train_data_file)
num_train_steps = int(num_examples / int(args.batch_size) * int(args.epoch))
num_warmup_steps = int(num_train_steps * args.warmup_proportion)
model_fn = model_fn_builder(args, task, num_train_steps, num_warmup_steps)
estimator = None
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=args.output_dir,
params={
'batch_size': int(args.batch_size),
# 'embedding_initializer': embedding_matrix,
},
config=run_config)
args.data_file = args.train_data_file
train_input_fn = tasks.PlatformTask.load_dataset(args)
if args.do_eval:
args.data_file = args.eval_data_file
eval_input_fn = tasks.PlatformTask.load_dataset(args)
early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator=estimator,
metric_name='eval_loss',
max_steps_without_decrease=int(args.max_steps_without_decrease),
min_steps=10,
run_every_secs=None,
run_every_steps=int(args.save_checkpoints_steps))
tf.estimator.train_and_evaluate(
estimator,
train_spec=tf.estimator.TrainSpec(train_input_fn, max_steps=num_train_steps,
hooks=[early_stopping_hook]),
eval_spec=tf.estimator.EvalSpec(eval_input_fn, steps=100))
else:
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print("whole time: {}s".format(time.time() - start))
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
main(args)
if __name__ == "__main__":
cli_main()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/conv_layer.py
|
import tensorflow as tf
from PlatformNlp.modules.utils import get_shape_list, create_initializer
def conv_layer(input_tensor,
filter_sizes=[2, 3],
num_filters=128,
initializer_range=0.1):
input_shape = get_shape_list(input_tensor, expected_rank=4)
sequence_length = input_shape[1]
input_width = input_shape[-2]
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.variable_scope("conv-maxpool-%s" % filter_size, default_name="conv-maxpool-0"):
# Convolution Layer
filter_shape = [filter_size, input_width, 1, num_filters]
W = tf.get_variable(name="W",
shape=filter_shape,
initializer=create_initializer(initializer_range))
b = tf.get_variable(name="b",
dtype = tf.float32,
initializer= tf.constant([0.1]*num_filters))
conv = tf.nn.conv2d(
input_tensor,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
return pooled_outputs
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/models/platform_model.py
|
<filename>src/nlp/PlatformNlp/models/platform_model.py
"""
Base classes for various platform models.
"""
import logging
logger = logging.getLogger(__name__)
class PlatformModel():
"""Base class for Platform models."""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
pass
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
raise NotImplementedError("Model must implement the build_model method")
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/transformer.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from PlatformNlp.modules.gelu import gelu
from PlatformNlp.modules.utils import get_shape_list,reshape_from_matrix, reshape_to_matrix, create_initializer
from PlatformNlp.modules.drop_out import dropout
from PlatformNlp.modules.layer_norm import layer_norm
from PlatformNlp.modules.attention import attention_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
|
jd-aig/aves2_algorithm_components
|
src/ml/regression/test_regression/run.py
|
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import numpy as np
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./model/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="MEDV")
args = parser.parse_args()
test_dataset = None
if os.path.exists(os.path.join(args.data_dir,'test.csv')):
test_dataset = os.path.join(args.data_dir,'test.csv')
elif os.path.exists(os.path.join(args.data_dir,'val.csv')):
test_dataset = os.path.join(args.data_dir,'val.csv')
elif os.path.exists(os.path.join(args.data_dir,'train.csv')):
test_dataset = os.path.join(args.data_dir,'train.csv')
else:
print("ERROR:test file invalid!")
exit()
test_data = pd.read_csv(test_dataset)
lst = test_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_val = test_data.ix[:,args.target].values
x_val = test_data.ix[:,lst].values
model_path = os.path.join(args.model_dir,'model.m')
if not os.path.exists(model_path):
print("ERROR: model.m is not exists")
exit()
model = joblib.load(model_path)
predict = model.predict(x_val)
pred_csv = pd.concat([test_data,pd.DataFrame(columns=['PREDICT'],data=predict)],sort=False,axis=1)
pred_csv.to_csv(os.path.join(args.output_path,'result.csv'),float_format = '%.3f')
print("Predict successful! results in data/output/result.csv")
|
jd-aig/aves2_algorithm_components
|
src/ml/classification/decisionTree/run.py
|
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix
from sklearn.externals import joblib
import numpy as np
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="virginica")
parser.add_argument("--criterion", type=str, default="entropy")#Gini,Information,Gain,Chisquare,entropy
parser.add_argument("--max_depth", type=str, default="None")
parser.add_argument("--min_samples_split", type=int, default=2)
args = parser.parse_args()
train_dataset = os.path.join(args.data_dir,'train.csv')
train_data = pd.read_csv(train_dataset)
lst = train_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_train = train_data.ix[:,args.target].values
x_train = train_data.ix[:,lst].values
if args.max_depth == "None":
max_depth = None
else:
max_depth = int(args.max_depth)
model = DecisionTreeClassifier(max_depth=max_depth,min_samples_split=args.min_samples_split,criterion=args.criterion)
model.fit(x_train,y_train)
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/drop_out.py
|
import tensorflow as tf
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, keep_prob=1.0-dropout_prob)
return output
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/data/text_semilarity_dataset.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import codecs
import random
import collections
import tensorflow as tf
import PlatformNlp.tokenization as tokenization
from PlatformNlp.data.base_dataset import BaseDataset
from PlatformNlp.data import register_dataset
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids_1,
input_ids_2,
label_id,
input_mask,
segment_ids,
is_real_example=True):
self.input_ids_1 = input_ids_1
self.input_ids_2 = input_ids_2
self.label_id = label_id
self.input_mask = input_mask
self.segment_ids = segment_ids
self.is_real_example = is_real_example
@register_dataset("text_similarity")
class TextSemilarityFixLenDataset(BaseDataset):
"""Loader for text semilarity Dataset"""
def __init__(self, args):
self.args = args
self.max_seq_length = 200 if not args.max_seq_length or args.max_seq_length <= 0 else args.max_seq_length
if int(self.max_seq_length) % 2 != 0:
self.max_seq_length = self.max_seq_length + 1
if args.label_file is not None and os.path.exists(args.label_file):
self.label_mapping = {}
else:
self.label_mapping = six.moves.cPickle.load(open(args.label_file, 'rb'))
def build_dataset(self, args, tokenizer):
set_type = args.type
data_file = args.data_file
label_file = args.label_file
output_file = args.output_file
if not os.path.exists(data_file):
raise FileExistsError("{} does not exists!!!".format(data_file))
if os.path.exists(output_file):
os.remove(output_file)
all_lines = []
with codecs.open(data_file, "r", 'utf-8', errors='ignore') as f:
lines = []
for line in f:
line = line.strip('\n')
line = line.strip("\r")
line = line.split(',')
if len(line) < 3:
continue
lines.append(line)
shuffle_index = list(range(len(lines)))
random.shuffle(shuffle_index)
for i in range(len(lines)):
shuffle_i = shuffle_index[i]
if len(lines[i]) != 3:
continue
line_i = [str(lines[shuffle_i][0]), str(lines[shuffle_i][1]), str(lines[shuffle_i][2])]
all_lines.append(line_i)
del lines
examples = []
for (i, line) in enumerate(all_lines):
# Only the test set has a header
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if set_type.lower() == "train":
if label not in self.label_mapping:
self.label_mapping[label] = len(self.label_mapping)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
if set_type.lower() != "train":
if not os.path.exists(label_file):
raise EnvironmentError("no labels exists !!!!!")
self.label_mapping = six.moves.cPickle.load(open(label_file, 'rb'))
else:
with open(label_file, 'wb') as f:
six.moves.cPickle.dump(self.label_mapping, f)
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = self.convert_single_example(ex_index, example, self.label_mapping, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_str_feature(value):
if isinstance(value, str):
value = bytes(value, encoding='utf-8')
f = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return f
features = collections.OrderedDict()
features["input_ids_1"] = create_int_feature(feature.input_ids_1)
features["input_ids_2"] = create_int_feature(feature.input_ids_2)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def builder(self, tfrecord_file, is_training, batch_size, drop_remainder, args):
name_to_features = {
"input_ids_1": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"input_ids_2": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([self.max_seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
args.name_to_features = name_to_features
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
# batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(tfrecord_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def convert_single_example(self, ex_index, example, label_mapping, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids_1=[0] * (self.max_seq_length / 2),
input_ids_2=[0] * (self.max_seq_length / 2),
input_mask=[0] * self.max_seq_length,
segment_ids=[0] * self.max_seq_length,
label_id=0,
is_real_example=False)
tokens_a = []
tokens_b = []
segment_ids = []
tokens_a.append("[CLS]")
segment_ids.append(0)
for token in example.tokens_a:
tokens_a.append(token)
segment_ids.append(0)
tokens_a.append("[SEP]")
segment_ids.append(0)
input_ids_1 = tokenizer.convert_tokens_to_ids(tokens_a)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(tokens_a)
while len(input_ids_1) < (self.max_seq_length / 2):
input_ids_1.append(0)
input_mask.append(0)
segment_ids.append(0)
if example.tokens_b:
for token in example.tokens_b:
tokens_b.append(token)
input_mask.append(1)
segment_ids.append(1)
tokens_b.append("[SEP]")
segment_ids.append(1)
input_mask.append(1)
input_ids_2 = tokenizer.convert_tokens_to_ids(tokens_b)
while len(input_ids_2) < (self.max_seq_length / 2):
input_ids_2.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids_1) == (self.max_seq_length / 2)
assert len(input_ids_2) == (self.max_seq_length / 2)
assert len(input_mask) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
label_id = 0
if example.label != 'nan':
label_id = int(label_mapping.get(example.label, 0))
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens_a: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens_a]))
tf.logging.info("tokens_b: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens_b]))
tf.logging.info("input_ids_1: %s" % " ".join([str(x) for x in input_ids_1]))
tf.logging.info("input_ids_2: %s" % " ".join([str(x) for x in input_ids_2]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids_1=input_ids_1,
input_ids_2=input_ids_2,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
|
jd-aig/aves2_algorithm_components
|
src/ml/classification/val_classification/run.py
|
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,confusion_matrix
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./model/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="virginica")
args = parser.parse_args()
val_dataset = os.path.join(args.data_dir,'val.csv')
val_data = pd.read_csv(val_dataset)
lst = val_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_val = val_data.ix[:,args.target].values
x_val = val_data.ix[:,lst].values
save_path = os.path.join(args.model_dir,'model.m')
model = joblib.load(save_path)
predict = model.predict(x_val)
c_m = confusion_matrix(y_val, predict)
c_m = str(c_m)
print('confusion_matrix : \n', c_m)
c_m = c_m.replace("[", "")
c_m = c_m.replace("]", "")
fcm = open(os.path.join(args.output_path,"confusion_matrix.txt"), "w")
cm_lines = c_m.split("\n")
for i in range(len(cm_lines)):
cm = str(cm_lines[i])
cm = cm.lstrip()
cm = cm.rstrip()
cm = cm.split(" ")
for j in range(len(cm)):
num = str(cm[j])
num = num.lstrip()
num = num.rstrip()
if not num.isspace() and num != '':
fcm.write(str(cm[j]))
if j < (len(cm)-1):
fcm.write("\t")
fcm.write("\n")
fcm.close()
accuracy = accuracy_score(y_val, predict)
p_score = precision_score(y_val, predict , average='macro')
r_score = recall_score(y_val, predict, average='macro')
f1 = f1_score(y_val, predict, average='macro')
json_dict = {}
json_dict["accuracy"] = accuracy
json_dict["p_score"] = p_score
json_dict["r_score"] = r_score
json_dict["f1_score"] = f1
json_data = json.dumps(json_dict)
f = open(os.path.join(args.output_path,"result.json"),"w")
f.write(str(json_data))
f.close()
print('accuracy : ', accuracy)
print('p_score : ', p_score)
print('r_score : ', r_score)
print('f1_score : ', f1)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/models/word2vec.py
|
<gh_stars>1-10
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__all__ = ['Word2vecModel']
__author__ = 'xulu46'
__date__ = '2020.10.14'
"""The main dssm model and related functions."""
import copy
import tensorflow as tf
from PlatformNlp.modules.embedding_lookup import embedding_lookup
from PlatformNlp.models import register_model, register_model_architecture
from PlatformNlp.models.platform_model import PlatformModel
from PlatformNlp.modules.dssm_layer import dssm_layer
from PlatformNlp.modules.utils import get_activation
@register_model('word2vec')
class Word2vecModel(PlatformModel):
"""
```python
# Already been converted into WordPiece token ids
...
```
"""
def __init__(self, features, vocab_size, embedding_size, initializer_range):
super().__init__()
input_ids = features["input_ids"]
input_ids = tf.sparse_tensor_to_dense(input_ids)
with tf.variable_scope("word2vec"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=vocab_size,
embedding_size=embedding_size,
initializer_range=initializer_range,
word_embedding_name="embeddings",
embedding_initializer=None)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--vocab_size', type=int,
help='vocab size')
parser.add_argument('--embedding_size', type=int,
help='textcnn embedding dimension')
parser.add_argument('--num_sampled', type=int,
help='num sampled for negative sampling')
parser.add_argument('--min_count', type=int,
help='min count for counting')
parser.add_argument('--skip_window', type=int,
help='skip window for training')
parser.add_argument('--num_skips', type=int,
help='num_skips for training')
parser.add_argument('--initializer_range', type=float,
help='initializer range for embedding')
@classmethod
def build_model(cls, args, task):
base_architecture(args)
return Word2vecModel(task.features, args.vocab_size, args.embedding_size, args.initializer_range)
def get_output(self):
return self.embedding_output
def get_embedding(self):
return self.embedding_table
@register_model_architecture('word2vec', 'word2vec')
def base_architecture(args):
args.vocab_size = 21128 if args.vocab_size is None else args.vocab_size
args.embedding_size = 128 if args.embedding_size is None else args.embedding_size
args.num_sampled = 64 if args.num_sampled is None else args.num_sampled
args.min_count = 5 if args.min_count is None else args.min_count
args.skip_window = 2 if args.skip_window is None else args.skip_window
args.num_skips = 0.1 if args.num_skips is None else args.num_skips
args.l2_reg_lambda = 0.9 if args.l2_reg_lambda is None else args.l2_reg_lambda
args.initializer_range = 0.1 if args.initializer_range is None else args.initializer_range
|
jd-aig/aves2_algorithm_components
|
src/ml/regression/val_regression/run.py
|
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import numpy as np
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./model/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="MEDV")
args = parser.parse_args()
val_dataset = os.path.join(args.data_dir,'val.csv')
if not os.path.exists(val_dataset):
print("ERROR: val.csv is not exists!")
exit()
val_data = pd.read_csv(val_dataset)
lst = val_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_val = val_data.ix[:,args.target].values
x_val = val_data.ix[:,lst].values
model_path = os.path.join(args.model_dir,'model.m')
if not os.path.exists(model_path):
print("ERROR: model.m is not exists")
exit()
model = joblib.load(model_path)
predict = model.predict(x_val)
rmse = (np.sqrt(mean_squared_error(y_val, predict)))
r2 = r2_score(y_val,predict)
json_dict = {}
json_dict["rmse"] = rmse
json_dict["r2_score"] = r2
json_data = json.dumps(json_dict)
f = open(os.path.join(args.output_path,"result.json"),"w")
f.write(str(json_data))
f.close()
print('rmse : ', rmse)
print('r2_score : ', r2)
print('val successful! results in data/output/result.json')
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/criterions/platform_criterion.py
|
class PlatformNlpCriterion():
def __init__(self, args, task):
self.args = args
self.task = task
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
pass
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_end2end/ATTENTION_OCR/tfrecord.py
|
<filename>src/cv/ocr_end2end/ATTENTION_OCR/tfrecord.py
# -*- coding: UTF-8 -*-
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import threading
lock = threading.Lock()
import random
import cv2
import codecs
import json
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
tf.app.flags.DEFINE_string('data_dir', '/mnt/shared/easydl/train_data_dir', '''''')
tf.app.flags.DEFINE_integer('num_shard', 10, '''''')
tf.app.flags.DEFINE_integer('num_thread', 10, '''''')
tf.app.flags.DEFINE_string('output_dir', '/mnt/shared/easydl/train_data_dir_tfreocrd', '''''')
tf.app.flags.DEFINE_string('config_dir', '/mnt/shared/easydl/train_data_dir_tfreocrd', '''''')
FLAGS = tf.app.flags.FLAGS
new_config = {
"null_code": 84,
"name": "MYDATASET",
"items_to_descriptions": {
"text": "A unicode string.",
"image": "A [150 x 150 x 3] colorimage.",
"num_of_views": "A number of different views stored within the image.",
"length": "A length of the encodedtext.",
"label": "Characters codes."
},
"image_shape": [150, 150, 3],
"charset_filename": "charset_size.txt",
"max_sequence_length": 275,
"num_of_views": 1,
"splits": {
"test": {
"pattern": "test*",
"size": 7
},
"train": {
"pattern": "train*",
"size": 1000
}
}
}
def build_char(data_dir):
config_file = os.path.join(FLAGS.config_dir,'newdataset_config_json.json')
#f = open(config_file,'r')
#new_config = json.load(f)
#f.close()
char_dic = []
label_dic = {}
length = 0
files = tf.gfile.Glob(data_dir + '/*.txt')
for f in files:
text_in_image = ''
import codecs
for line in codecs.open(f,'r',encoding='utf-8').readlines():
text = ''.join(line.split(',')[8:])
text = text.replace('\r','')
text = text.replace('\n','')
if '#' in text:
continue
else:
text_in_image += text
label_dic[os.path.abspath(f)] = text_in_image
length = len(text_in_image) if len(text_in_image) > length else length
char_dic.extend(list(text_in_image))
char_dic = list(set(char_dic))
if 'train' in data_dir:
key = range(len(char_dic))
char_set = dict(zip(char_dic,key))
char_set['length'] = length
import codecs
with codecs.open(os.path.join(FLAGS.output_dir,"charset.json"),'w',encoding='utf-8') as json_file:
json.dump(char_set, json_file, sort_keys=True, indent=4, separators=(',', ': '))
new_config['null_code'] = len(char_set.keys())-1
new_config['max_sequence_length'] = length
new_config['splits']['train']['size'] = len(files)
#f = open(config_file,'w')
#json.dump(new_config,f)
#f.close()
else:
import codecs
with codecs.open(os.path.join(FLAGS.output_dir,"charset.json"),'r',encoding='utf-8') as json_file:
char_set=json.load(json_file)
length = char_set['length']
new_config['splits']['test']['size'] = len(files)
f = open(config_file,'w')
json.dump(new_config,f,indent=4)
f.close()
char_set.pop('length')
null_char_id = len(char_set.keys())
if 'train' in data_dir:
char_path = os.path.join(FLAGS.output_dir,'charset_size.txt')
import codecs
fw = codecs.open(char_path,'w+',encoding='utf-8')
for i in char_set:
tt = str(char_set[i]) + '\t' + i + '\n'
fw.write(tt)
tt = str(null_char_id) + '\t' + '<nul>'
fw.write(tt)
fw.close()
return char_set, length, null_char_id, label_dic
def encode_utf8_string(text, length, charset, null_char_id):
char_ids_padded = [null_char_id]*length
char_ids_unpadded = [null_char_id]*len(text)
for i in range(len(text)):
hash_id = charset[text[i]]
char_ids_padded[i] = hash_id
char_ids_unpadded[i] = hash_id
return char_ids_padded, char_ids_unpadded
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def process_image(filename):
img = cv2.imread(filename)
img = cv2.resize(img,(150,150))
_,jpgVector = cv2.imencode('.jpg',img)
image_data = jpgVector.tostring()
#image_data = tf.gfile.GFile(filename,'r').read()
"""
sess = tf.Session()
try:
image = tf.image.decode_image(image_data, channels = 3).eval(session=sess)
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
assert len(image.shape) == 3
except:
pass
"""
width = 150
height =150
return image_data, width, height
def convert_to_example(image_file, image_buffer, text, char_ids_padded, char_ids_unpadded, width, height):
example = tf.train.Example(features=tf.train.Features(feature={
'image/format': _bytes_feature("JPG"),
'image/encoded': _bytes_feature(image_buffer),
'image/class':_int64_feature(char_ids_padded),
'image/unpadded_class': _int64_feature(char_ids_unpadded),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'orig_width': _int64_feature(width),
'image/text': _bytes_feature(text),
'image/image_file':_bytes_feature(image_file)
}
))
return example
def process_image_files_batch(thread_index, ranges, name, filename_label_dic, charset, length, null_char_id, num_shards):
num_threads = len(ranges)
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
filenames = filename_label_dic.keys()
for i in files_in_shard:
text = filename_label_dic[filenames[i]]
image_filename = filenames[i].replace('.txt','.jpg')
try:
char_ids_padded, char_ids_unpadded = encode_utf8_string(
text, length, charset, null_char_id)
image_buffer, width, height = process_image(image_filename)
except Exception as e:
print(e)
print(image_filename + ' is abandoned')
lock.acquire()
try:
num_example = num_example - 1
num_corrupted = num_corrupted + 1
finally:
lock.release()
continue
text = str(text)
example = convert_to_example(image_filename, image_buffer, text, char_ids_padded, char_ids_unpadded, width, height)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
writer.close()
def process_image_files(subset, filename_label_dic,charset,length,null_char_id):
num_shards = FLAGS.num_shard
num_thread = FLAGS.num_thread
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
num_images = len(filename_label_dic.keys())
spacing = np.linspace(0, num_images, num_thread + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
coord = tf.train.Coordinator()
threads = []
for thread_index in range(len(ranges)):
args = (thread_index, ranges, subset, filename_label_dic, charset, length, null_char_id, num_shards)
t = threading.Thread(target=process_image_files_batch, args=args)
t.start()
threads.append(t)
coord.join(threads)
def main(unused_argv):
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
if not tf.gfile.Exists(FLAGS.config_dir):
tf.gfile.MakeDirs(FLAGS.config_dir)
file_list = tf.gfile.ListDirectory(FLAGS.data_dir)
i = file_list[0] if 'train' in file_list[0] else file_list[1]
charset,length,null_char_id,train_filename_label_dic = build_char(os.path.join(FLAGS.data_dir,i))
i = file_list[0] if 'test' in file_list[0] else file_list[1]
charset,length,null_char_id,test_filename_label_dic = build_char(os.path.join(FLAGS.data_dir,i))
process_image_files('train',train_filename_label_dic,charset,length,null_char_id)
process_image_files('test',test_filename_label_dic,charset,length,null_char_id)
print('----' * 15)
print('finished')
print('----' * 15)
if __name__ == '__main__':
tf.app.run()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/metrics/multi_class_cross_entry_metrics.py
|
import logging
from PlatformNlp.metrics import register_metrices
from PlatformNlp.metrics.platform_metrics import PlatformMetrice
from PlatformNlp.utils import calculate_label
import json
logger = logging.getLogger(__name__)
@register_metrices('multi_class')
class MultiClassMetrics(PlatformMetrice):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--label_file', type=str, help='label_file for mapping json')
def __init__(self, args, input_ids, label_ids, predict_scores, label_mapping):
super().__init__(args, input_ids, label_ids, predict_scores, label_mapping)
self.args = args
self.input_ids = input_ids
self.label_ids = label_ids
self.predict_scores = predict_scores
self.label_mapping = label_mapping
def compute_metrices(self):
predict_labels = []
for score in self.predict_scores:
predict_labels.append(list(score).index(max(list(score))))
precision, recall, accuracy, f1_score, auc_micro, auc_macro, classify_report = calculate_label(predict_labels,
self.label_ids,
list(self.label_mapping.values()))
classification_report_dict = {}
classify_report = str(classify_report).split('\n')
for i in range(len(classify_report)):
x = classify_report[i]
x = str(x).split(' ')
xx = []
for j in x:
try:
assert len(j) > 0
xx.append(j)
except:
continue
if len(xx) == 4:
classification_report_dict['evaluation_index'] = xx
elif len(xx) == 7:
classification_report_dict['avg_all'] = xx[3:]
elif len(xx) > 0:
classification_report_dict[xx[0]] = xx[1:]
json_dict = dict()
json_dict["precision"] = precision
json_dict["recall_score"] = recall
json_dict["accuracy"] = accuracy
json_dict["f1_score"] = f1_score
json_dict["auc_micro"] = auc_micro
json_dict["auc_macro"] = auc_macro
json_dict["classification_report"] = classification_report_dict
json_data = json.dumps(json_dict)
return json_data
|
jd-aig/aves2_algorithm_components
|
src/cv/object_detection/fasterrcnn_resnet50/cal_params.py
|
import os
import argparse
from object_detection.utils import label_map_util
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="./")
parser.add_argument("--epochs", type=int, default=30)
parser.add_argument("--batch_size", type=int, default=24)
parser.add_argument("--learning_rate", type=float, default=0.0001)
args = parser.parse_args()
label_map_path = os.path.join(args.data_dir, 'ImageSets', 'label_map.pbtxt')
label_map_dict = label_map_util.get_label_map_dict(label_map_path)
num_classes_val = len(label_map_dict)
train_txt = os.path.join(args.data_dir, 'ImageSets', 'train.txt')
val_txt = os.path.join(args.data_dir, 'ImageSets', 'val.txt')
count = 0
for index, line in enumerate(open(train_txt,'r')):
count += 1
num_examples_train = count
count = 0
for index, line in enumerate(open(val_txt,'r')):
count += 1
num_examples_val = count
num_train_steps_val = num_examples_train//args.batch_size
if num_train_steps_val == 0:
num_train_steps_val = 1
num_train_steps_val = num_train_steps_val*args.epochs
decay_steps_val_1 = num_train_steps_val//3
if decay_steps_val_1 == 0:
decay_steps_val_1 = 1
decay_steps_val_2 = num_train_steps_val//3*2
if decay_steps_val_2 == 0:
decay_steps_val_2 = 1
if decay_steps_val_2 == decay_steps_val_1:
decay_steps_val_2 = decay_steps_val_1+1
learning_rate_val_1 = args.learning_rate*0.1
learning_rate_val_2 = args.learning_rate*0.01
print("num_classes_val = ",num_classes_val," ,decay_steps_val_1 = ",decay_steps_val_1," ,decay_steps_val_2 = ",decay_steps_val_2," ,num_examples_val = ",num_examples_val," ,num_train_steps_val = ",num_train_steps_val," ,learning_rate_val_1 = ",learning_rate_val_1," ,learning_rate_val_2 = ",learning_rate_val_2)
f = open(os.path.join("cal_params.txt"),"w")
f.write(str(num_classes_val)+'\n')
f.write(str(decay_steps_val_1)+'\n')
f.write(str(decay_steps_val_2)+'\n')
f.write(str(num_examples_val)+'\n')
f.write(str(num_train_steps_val)+'\n')
f.write(str(learning_rate_val_1)+'\n')
f.write(str(learning_rate_val_2)+'\n')
f.close()
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/preprocessing/fake_new_inception_preprocessing.py
|
<reponame>jd-aig/aves2_algorithm_components
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import cv2
import tensorflow as tf
import time
def opencv_preprocess_for_train(image,height,width,begin,size):
print('opeencv start')
croped_image = image[begin[0]:begin[0]+size[0],begin[1]:begin[1]+size[1],:]
resized_image = cv2.resize(croped_image,(height,width),interpolation = cv2.INTER_LINEAR)
lr_flip = cv2.flip(resized_image, 1) if numpy.random.uniform()>0.5 else resized_image
ud_flip = cv2.flip(lr_flip, 0) if numpy.random.uniform()>0.5 else lr_flip
#distorted_image = distort_image(ud_flip)
alpha = numpy.random.uniform(low= -32., high= 32., size= 1 )
blank = numpy.ones_like(ud_flip)
adjust_brightness_image = cv2.addWeighted(ud_flip,1,blank,alpha,0)
adjust_brightness_image[adjust_brightness_image[:,:,:]>255]=255
adjust_brightness_image[adjust_brightness_image[:,:,:]<0]=0
#image = cv2.inRange(image, numpy.array([0, 0, 0]), numpy.array([255, 255, 255]))
# adjust saturation
hsv = cv2.cvtColor(adjust_brightness_image,cv2.COLOR_RGB2HSV)
alpha = numpy.random.uniform(low= 0.5, high= 1.5, size= 1 )
hsv[:,:,1] = alpha * hsv[:,:,1]
#hsv[hsv[:,:,1]>180]=180 ???
adjust_saturation_image = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
distorted_image = adjust_saturation_image * 1./255.
distorted_image = (distorted_image - 0.5)*2
print('opeencv done')
print(time.time())
print(distorted_image)
return distorted_image
def preprocess_for_train(image,height,width):
with tf.name_scope(None, 'distort_image', [image, height, width]):
#image = tf.Print(image,[tf.shape(image)])
image = tf.Print(image,['in new.preprocess_for_train'])#
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75,1.33),
area_range=(0.05,1.0),
max_attempts=100,
use_image_if_no_bounding_boxes=True)
begin,size,_ = sample_distorted_bounding_box
height = tf.convert_to_tensor(height)
width = tf.convert_to_tensor(width)
#print(tf.py_func(opencv_preprocess_for_train, [image,height,width,begin,size], tf.float32))
preprocessed_image = tf.py_func(opencv_preprocess_for_train, [image,height,width,begin,size], tf.float32,stateful=True)
preprocessed_image = tf.Print(preprocessed_image,['in new.preprocess_for_train done'])
#print(preprocessed_image)
preprocessed_image = tf.Print(preprocessed_image,[preprocessed_image])
return preprocessed_image
def preprocess_for_eval(image,height,width):
def opencv_preprocess_for_eval(image,height,width):
h,w,_ = image.shape
croped_image = image[int(0.0625*h):int(0.9375*h),int(0.0625*w):int(0.9375*w),:]
resized_image = cv2.resize(croped_image,(height,width),interpolation = cv2.INTER_LINEAR)
resized_image = resized_image * 1./255.
resized_image = (resized_image - 0.5)*2
return resized_image
image = tf.py_func(opencv_preprocess_for_eval, [image,height,width], tf.float32)
return image
def preprocess_image(image,height,width,is_training=False):
if is_training:
#sample_distorted_bounding_box = ([10,10,0],[900,900,-1],1)
#image = tf.Print(image,[is_training])
return preprocess_for_train(image,height,width)
else:
return preprocess_for_eval(image,height,width)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/tasks/platform_task.py
|
import logging
import os
import warnings
from PlatformNlp.data.dictionary import Dictionary
logger = logging.getLogger(__name__)
class PlatformTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
def __init__(self, args):
self.args = args
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(cls, filenames, tokenizer):
d = Dictionary()
for filename in filenames:
counter = Dictionary.add_file_to_dictionary(filename, tokenizer)
for w, c in sorted(counter.items()):
d.add_symbol(w, c)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
@classmethod
def load_dataset(cls, args):
"""Load a given dataset split (e.g., train, valid, test)."""
from PlatformNlp import data
dataset = data.get_dataset(args)
if args.type == "train":
is_training = True
else:
is_training = False
input_fn = dataset.builder(args.data_file, is_training, args.batch_size, is_training, args)
return input_fn
def build_criterion(self, args):
"""
Build the :class:`~PlatformNlp.criterions.PlatformNlpCriterion` instance for
this task.
"""
from PlatformNlp import criterions
return criterions.build_criterion(args, self)
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from PlatformNlp import models
model = models.build_model(args, self)
self.model = model
return model
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from PlatformNlp import criterions
return criterions.build_criterion(args, self)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~PlatformNlp.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_recognition/CRNN/data_provider/__init__.py
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# @Time : 17-9-22 下午1:39
# @Author : <NAME>
# @Site : http://github.com/TJCVRS
# @File : __init__.py
# @IDE: PyCharm Community Edition
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/data/dictionary.py
|
<gh_stars>1-10
import os
from collections import Counter
from multiprocessing import Pool
from PlatformNlp import utils
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self,
*, # begin keyword-only arguments
begin="[CLS]",
pad="[PAD]",
sep="[SEP]",
unk="[UNK]",
mask="[MASK]",
extra_special_symbols=None,
):
self.unk_word, self.pad_word, self.sep_word, self.begin_word, self.mark_word = unk, pad, sep, begin, mask
self.symbols = []
self.count = []
self.indices = {}
self.begin_index = self.add_symbol(begin)
self.pad_index = self.add_symbol(pad)
self.sep_index = self.add_symbol(sep)
self.unk_index = self.add_symbol(unk)
self.mask_index = self.add_symbol(mask)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def add_symbol(self, word, n=1, overwrite=False):
"""Adds a word to the dictionary"""
if word in self.indices and not overwrite:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def finalize(self, threshold=-1, nwords=-1, padding_factor=4):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[: self.nspecial]
new_count = self.count[: self.nspecial]
c = Counter(
dict(
sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :]))
)
)
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
@classmethod
def load(cls, f):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
with open(f, "r", encoding="utf-8") as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(
"Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f)
)
return
for line in f:
try:
line = line.strip("\n")
line = line.strip("\r")
line = line.strip(" ")
word = line
self.add_symbol(word, n=1, overwrite=True)
except ValueError:
raise ValueError(
"Incorrect dictionary format, expected '<token> <cnt> [flags]'"
)
@staticmethod
def _add_file_to_dictionary(filename, dict, tokenize):
counter = Counter()
with open(filename, "r", encoding="utf-8") as f:
for line in f:
line = line.strip("\n")
line = line.strip("\r")
for word in tokenize(line):
counter.update([word])
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/criterions/nce_loss.py
|
import math
import tensorflow as tf
from PlatformNlp.criterions.platform_criterion import PlatformNlpCriterion
from PlatformNlp.criterions import register_criterion
@register_criterion('nce')
class NceLossCriterion(PlatformNlpCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
self.task = task
def get_loss(self):
"""Construct a criterion from command-line args."""
embedding_output = self.task.model.get_output()
embedding_output = tf.reshape(embedding_output, [-1, self.args.embedding_size])
embedding_table = self.task.model.get_embedding()
labels = tf.sparse_tensor_to_dense(self.task.labels)
labels = tf.reshape(labels, [-1])
labels = tf.expand_dims(labels, axis=[-1])
with tf.variable_scope("nce", reuse=tf.AUTO_REUSE):
self.nce_weight = tf.get_variable("nce_weight", initializer=tf.truncated_normal(
[self.args.vocab_size, self.args.embedding_size], stddev=1.0 / math.sqrt(self.args.embedding_size)))
self.nce_biases = tf.get_variable("nce_biases", initializer=tf.zeros([self.args.vocab_size]))
per_example_loss = tf.nn.nce_loss(weights=self.nce_weight,
biases=self.nce_biases,
labels=labels,
inputs=embedding_output,
num_sampled=self.args.num_sampled,
num_classes=self.args.vocab_size)
loss = tf.reduce_mean(per_example_loss)
logits = tf.matmul(embedding_output, self.nce_weight, transpose_b=True)
logits = tf.nn.bias_add(logits, self.nce_biases)
vec_l2_model = tf.sqrt( # 求各词向量的L2模
tf.reduce_sum(tf.square(embedding_table), 1, keep_dims=True)
)
normed_embedding = embedding_table / vec_l2_model
input_ids = self.task.features["input_ids"]
input_ids = tf.sparse_tensor_to_dense(input_ids)
normed_embedding_output = tf.nn.embedding_lookup(normed_embedding, input_ids)
normed_embedding_output = tf.reshape(normed_embedding_output, [-1, self.args.embedding_size])
similarity = tf.matmul(normed_embedding_output, normed_embedding, transpose_b=True)
similarity = tf.reshape(similarity, [self.args.batch_size, -1, self.args.vocab_size])
probabilities = similarity
return (loss, per_example_loss, logits, probabilities)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/batch_norm.py
|
<reponame>jd-aig/aves2_algorithm_components
import tensorflow as tf
def batch_normalization(x, phase_train, gamma, beta):
"""
Batch normalization on convolutional maps.
Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
Args:
x: Tensor, 4D BHWD input maps
phase_train: boolean tf.Varialbe, true indicates training phase
gamma: Variable of this bn
beta: Variable of this bn
Return:
normed: batch-normalized maps
"""
with tf.variable_scope('bn'):
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/dssm_layer.py
|
import tensorflow as tf
from PlatformNlp.modules.utils import get_shape_list, create_initializer
from PlatformNlp.modules.batch_norm import batch_normalization
from PlatformNlp.modules.drop_out import dropout
from PlatformNlp.modules.cosine_score import get_cosine_score
def dssm_layer(query_ids, doc_ids, hidden_sizes, act_fn, is_training, max_seq_length, embedding_size, initializer_range, dropout_prob):
shape = get_shape_list(query_ids, expected_rank=[2, 3])
if len(shape) == 3:
query_ids = tf.reshape(query_ids, [-1, shape[1] * shape[2]])
doc_ids = tf.reshape(doc_ids, [-1, shape[1] * shape[2]])
for i in range(0, len(hidden_sizes) - 1):
query_ids = tf.layers.dense(query_ids, hidden_sizes[i], activation=act_fn,
name="query_{}".format(str(i)),
kernel_initializer=create_initializer(initializer_range))
doc_ids = tf.layers.dense(doc_ids, hidden_sizes[i], activation=act_fn,
name="doc_{}".format(str(i)),
kernel_initializer=create_initializer(initializer_range))
if is_training:
query_ids = dropout(query_ids, dropout_prob)
doc_ids = dropout(doc_ids, dropout_prob)
query_pred = act_fn(query_ids)
doc_pred = act_fn(doc_ids)
cos_sim = get_cosine_score(query_pred, doc_pred)
cos_sim_prob = tf.clip_by_value(cos_sim, 1e-8, 1.0)
prob = tf.concat([query_pred, doc_pred], axis=1)
return query_pred, doc_pred, prob
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_detection/EAST/pro.py
|
<filename>src/cv/ocr_detection/EAST/pro.py
txt_fn = im_fn.replace(os.path.basename(im_fn).split('.')[1], 'txt')
if not os.path.exists(txt_fn):
print('text file {} does not exists'.format(txt_fn))
continue
text_polys, text_tags = load_annoataion(txt_fn)
text_polys, text_tags = check_and_validate_polys(text_polys, text_tags, (h, w))
text_polys[:, :, 0] *= resize_ratio_3_x
text_polys[:, :, 1] *= resize_ratio_3_y
ratio_y_h, ratio_x_w
score_map, geo_map, training_mask = generate_rbox((new_h, new_w), text_polys, text_tags)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/layer_norm_and_drop_out.py
|
from PlatformNlp.modules.layer_norm import layer_norm
from PlatformNlp.modules.drop_out import dropout
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import tensorflow as tf
from PlatformNlp.modules.gelu import gelu
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
def create_initializer(initializer_range=0.1):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "tanh":
return tf.tanh
elif act == "gelu":
return gelu
else:
raise ValueError("Unsupported activation: %s" % act)
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/criterions/ner_loss.py
|
import math
import tensorflow as tf
from PlatformNlp.criterions.platform_criterion import PlatformNlpCriterion
from PlatformNlp.criterions import register_criterion
from tensorflow.contrib import crf
@register_criterion('ner')
class NerLossCriterion(PlatformNlpCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.args = args
self.task = task
def get_loss(self):
"""Construct a criterion from command-line args."""
sequence_output = self.task.model.get_output()
input_ids = self.task.features["input_ids"]
used = tf.sign(tf.abs(input_ids))
lengths = tf.reduce_sum(used, reduction_indices=1) # [batch_size] 大小的向量,包含了当前batch中的序列长度
sequence_output = tf.reshape(sequence_output, shape=[-1, self.args.hidden_size])
with tf.variable_scope("logits", reuse=tf.AUTO_REUSE):
w = tf.get_variable("logits_w", shape=[self.args.hidden_size, self.args.num_classes],
dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("logits_b", shape=[self.args.num_classes], dtype=tf.float32,
initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(sequence_output, w, b)
pred = tf.reshape(pred, [-1, self.args.max_seq_length, self.args.num_classes])
with tf.variable_scope("crf", reuse=tf.AUTO_REUSE):
trans = tf.get_variable(
"transitions",
shape=[self.args.num_classes, self.args.num_classes],
initializer=tf.contrib.layers.xavier_initializer())
if self.task.labels is None:
loss = None
log_likelihood = None
else:
log_likelihood, trans = tf.contrib.crf.crf_log_likelihood(
inputs=pred,
tag_indices=self.task.labels,
transition_params=trans,
sequence_lengths=lengths)
loss = tf.reduce_mean(-log_likelihood)
# CRF decode, pred_ids 是一条最大概率的标注路径
pred_ids, _ = crf.crf_decode(potentials=pred, transition_params=trans, sequence_length=lengths)
return (loss, None, pred, pred_ids)
|
jd-aig/aves2_algorithm_components
|
src/ml/regression/linearReg/run.py
|
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.externals import joblib
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="MEDV")
parser.add_argument("--fit_intercept", type=bool, default=True)
parser.add_argument("--normalize", type=bool, default=False)
args = parser.parse_args()
train_dataset = os.path.join(args.data_dir,'train.csv')
if not os.path.exists(train_dataset):
print("ERROR: train.csv is not exists!")
exit()
train_data = pd.read_csv(train_dataset)
lst = train_data.columns.values.tolist()
idx = lst.index(args.target)
del lst[idx]
y_train = train_data.ix[:,args.target].values
x_train = train_data.ix[:,lst].values
model = LinearRegression(fit_intercept=args.fit_intercept,normalize=args.normalize,copy_X=True,n_jobs=1)
model.fit(x_train,y_train,sample_weight=None)
save_path = os.path.join(args.output_path,'model.m')
joblib.dump(model,save_path)
print("LinearRegression train finished.save model in model/output/model.m")
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_detection/CTPN/utils/prepare/test.py
|
<reponame>jd-aig/aves2_algorithm_components
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
import shapely
from shapely.geometry import Polygon,MultiPoint
from utils import pickTopLeft
line1=[1,0,30,1,5,55,5,20,40,3,6,40,10,4]
p=np.array(line1).reshape(7, 2)
points = Polygon(p).convex_hull
print(points)
points = np.array(points.exterior.coords)
print(points)
points = points[::-1]
print(points)
points = pickTopLeft(points)
print(points)
points = np.array(points).reshape([4, 2])
print(points)
poly = points
x_min = int(np.min(poly[:, 0]))
x_max = int(np.max(poly[:, 0]))
k1 = (poly[1][1] - poly[0][1]) / (poly[1][0] - poly[0][0])
b1 = poly[0][1] - k1 * poly[0][0]
k2 = (poly[2][1] - poly[3][1]) / (poly[2][0] - poly[3][0])
b2 = poly[3][1] - k2 * poly[3][0]
print(k1,b1,k2,b2)
res = []
r = 16
start = int((x_min // 16 + 1) * 16)
end = int((x_max // 16) * 16)
print(start)
print(end)
p = x_min
res.append([p, int(k1 * p + b1),
start - 1, int(k1 * (p + 15) + b1),
start - 1, int(k2 * (p + 15) + b2),
p, int(k2 * p + b2)])
print(res)
for p in range(start, end + 1, r):
res.append([p, int(k1 * p + b1),
(p + 15), int(k1 * (p + 15) + b1),
(p + 15), int(k2 * (p + 15) + b2),
p, int(k2 * p + b2)])
print(res)
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/performence.py
|
<reponame>jd-aig/aves2_algorithm_components<gh_stars>1-10
import sklearn
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import OneHotEncoder
import json
import argparse
import numpy
def per(labels,logits,files,output_dir):
preds = [i.index(max(i)) for i in logits]
num_class = len(logits[0])
enc = OneHotEncoder(dtype=numpy.int)
X = [[i] for i in range(num_class)]
labelsll = [[i] for i in labels]
enc.fit(X)
labels_one_hot = enc.transform(labelsll).toarray()
classif_per = {}
accuracy_score = metrics.accuracy_score(labels,preds)
classif_per['accuracy'] = accuracy_score
precision_score = metrics.precision_score(labels,preds,average='macro')
classif_per['precision'] = precision_score
f1_score = metrics.f1_score(labels,preds,average='macro')
classif_per['f1_score'] = f1_score
recall_score = metrics.recall_score(labels,preds,average='macro')
classif_per['recall_score'] = recall_score
roc_auc_score_micro = metrics.roc_auc_score(labels_one_hot, logits, average='micro')
classif_per['auc_micro'] = roc_auc_score_micro
classification_report_dict = {}
classification_report = metrics.classification_report(labels,preds,labels=range(num_class))
classification_report = str(classification_report).split('\n')
for i in range(len(classification_report)):
x = classification_report[i]
x = str(x).split(' ')
xx =[]
for j in x:
try:
assert len(j)>0
xx.append(j)
except:
continue
if len(xx) == 4:
classification_report_dict['evaluation_index'] = xx
elif len(xx) == 7:
classification_report_dict['avg_all'] = xx[3:]
elif len(xx)>0:
classification_report_dict[xx[0]]=xx[1:]
classif_per['classification_report'] = classification_report_dict
confusion_matrix = metrics.confusion_matrix(labels,preds)
confusion_matrix_str = ''
for i in confusion_matrix:
for j in i:
confusion_matrix_str = confusion_matrix_str + str(j) + '\t'
confusion_matrix_str = confusion_matrix_str + '\n'
classif_per_path = output_dir + 'result.json'
jsObj = json.dumps(classif_per)
fileObject = open(classif_per_path, 'w')
fileObject.write(jsObj)
fileObject.close()
confusion_matrix_path = output_dir + 'confusion_matrix.txt'
confusion_matrix_file = open(confusion_matrix_path,'w')
confusion_matrix_file.write(confusion_matrix_str)
confusion_matrix_file.close()
correct_path = output_dir + 'correct.txt'
error_path = output_dir + 'error.txt'
right = open(correct_path,'w')
error = open(error_path,'w')
for i in range(len(files)):
f = files[i]
l = labels[i]
p = preds[i]
if l == p:
right.write(f + '\t' + str(l) + '\t' + str(p) + '\n')
else:
error.write(f + '\t' + str(l) + '\t' + str(p) + '\n')
right.close()
error.close()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/checkpoint_util.py
|
import re
import operator
import collections
import tensorflow as tf
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
shape_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var_shape) = (x[0], x[1])
if name not in name_to_variable:
continue
elif not operator.eq(name_to_variable[name].shape.as_list(), list(var_shape)):
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
|
jd-aig/aves2_algorithm_components
|
src/cv/image_classification/preprocessing/new_inception_preprocessing.py
|
<reponame>jd-aig/aves2_algorithm_components<filename>src/cv/image_classification/preprocessing/new_inception_preprocessing.py
# it is a reconsitution of inception_preprocessing with opencv tool
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy
from tensorflow.python.ops import control_flow_ops
import cv2
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None,
add_image_summaries=True):
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75,1.33),
area_range=(0.05,1.0),
max_attempts=100,
use_image_if_no_bounding_boxes=True)
begin,size,_ = sample_distorted_bounding_box
if image.dtype != tf.float32:
image = tf.cast(image, tf.float32)
#image = tf.image.convert_image_dtype(image, dtype=tf.float32)
#image = tf.Print(image,['before preprocess',image],summarize=100)
def opencv_preprocess_for_train(image,height,width,begin,size):
#return(image)
croped_image = image[begin[0]:begin[0]+size[0],begin[1]:begin[1]+size[1],:]
resized_image = cv2.resize(croped_image,(height,width),interpolation = cv2.INTER_LINEAR)
lr_flip = cv2.flip(resized_image, 1) if numpy.random.uniform()>0.5 else resized_image
ud_flip = cv2.flip(lr_flip, 0) if numpy.random.uniform()>0.5 else lr_flip
#distorted_image = distort_image(ud_flip)
alpha = numpy.random.uniform(low= -32., high= 32., size= 1 )
blank = numpy.ones_like(ud_flip)
adjust_brightness_image = cv2.addWeighted(ud_flip,1,blank,alpha,0)
adjust_brightness_image[adjust_brightness_image[:,:,:]>255]=255
adjust_brightness_image[adjust_brightness_image[:,:,:]<0]=0
#image = cv2.inRange(image, numpy.array([0, 0, 0]), numpy.array([255, 255, 255]))
# adjust saturation
hsv = cv2.cvtColor(adjust_brightness_image,cv2.COLOR_RGB2HSV)
alpha = numpy.random.uniform(low= 0.5, high= 1.5, size= 1 )
hsv[:,:,1] = alpha * hsv[:,:,1]
#hsv[hsv[:,:,1]>180]=180 ???
adjust_saturation_image = cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
distorted_image = adjust_saturation_image * 1./255.
distorted_image = (distorted_image - 0.5)*2
return distorted_image
image = tf.py_func(opencv_preprocess_for_train, [image,height,width,begin,size], tf.float32)
#image = tf.Print(image,['after preprocess',image],summarize=100)
return image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.cast(image, tf.float32)
#image = tf.image.convert_image_dtype(image, dtype=tf.float32)
#image = tf.Print(image,[image])
# Crop the central region of the image with an area containing 87.5% of
# the original image.
def opencv_preprocess_for_eval(image,height,width):
h,w,_ = image.shape
#croped_image = image[int(0.0625*w):int(int(0.0625*w)+0.875*w),int(0.0625*h):int(int(0.0625*h)+0.875*h),:]
croped_image = image[int(0.0625*h):int(int(0.0625*h)+0.875*h),int(0.0625*w):int(int(0.0625*w)+0.875*w),:]
#croped_image = image[int(0.0625*h):int(0.9375*h),int(0.0625*w):int(0.9375*w),:]
resized_image = cv2.resize(croped_image,(width,height),interpolation = cv2.INTER_LINEAR)
resized_image = resized_image * 1./255.
resized_image = (resized_image - 0.5)*2
return resized_image
height = tf.convert_to_tensor(height)
width = tf.convert_to_tensor(width)
image = tf.py_func(opencv_preprocess_for_eval, [image,height,width], tf.float32)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode,
add_image_summaries=add_image_summaries)
else:
return preprocess_for_eval(image, height, width)
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/modules/layer_norm.py
|
import tensorflow as tf
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
|
jd-aig/aves2_algorithm_components
|
src/cv/ocr_end2end/ATTENTION_OCR/demo_inference.py
|
# -*- coding: utf-8 -*-
"""A script to run inference on a set of image files.
NOTE #1: The Attention OCR model was trained only using FSNS train dataset and
it will work only for images which look more or less similar to french street
names. In order to apply it to images from a different distribution you need
to retrain (or at least fine-tune) it using images from that distribution.
NOTE #2: This script exists for demo purposes only. It is highly recommended
to use tools and mechanisms provided by the TensorFlow Serving system to run
inference on TensorFlow models in production:
https://www.tensorflow.org/serving/serving_basic
Usage:
python demo_inference.py --batch_size=32 \
--checkpoint=model.ckpt-399731\
--image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png
"""
#from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.python.platform import flags
from tensorflow.python.training import monitored_session
import common_flags
import datasets
import data_provider
import json
import os
import json
FLAGS = flags.FLAGS
common_flags.define()
# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png
flags.DEFINE_string('image_path', '',
'A file pattern with a placeholder for the image index.')
flags.DEFINE_string('result_path', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
# Ideally this info should be exposed through the dataset interface itself.
# But currently it is not available by other means.
ds_module = getattr(datasets, dataset_name)
config_path = os.path.join(FLAGS.config_dir,'newdataset_config_json.json')
config = json.load(open(config_path,'r'))
height, width, _ = config['image_shape']
return width, height
def load_images(path,dataset_name):
filenames = []
width, height = get_dataset_image_size(dataset_name)
images_actual_data = []
#images_actual_data = np.ndarray(shape=(1, height, width, 3),
# dtype='uint8')
for i in tf.gfile.Glob(path+'*.jpg'):
print("Reading %s" % i)
pil_image = PIL.Image.open(i)
pil_image = pil_image.resize((width, height),PIL.Image.ANTIALIAS)
images_actual_data.append(np.asarray(pil_image))
filenames.append(i)
return images_actual_data,filenames,len(images_actual_data)
def create_model(batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset,_ = common_flags.create_dataset('train')
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
raw_images = tf.placeholder(tf.uint8, shape=[batch_size, height, width, 3])
images = tf.map_fn(data_provider.preprocess_image, raw_images,
dtype=tf.float32)
endpoints = model.create_base(images, labels_one_hot=None)
return raw_images, endpoints
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
result = {}
images_data, image_files, num_images = load_images(image_path_pattern,dataset_name)
images_placeholder, endpoints = create_model(1,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
for i in range(num_images):
image = images_data[i]
image = image[np.newaxis,:,:,:]
predictions = sess.run(endpoints.predicted_text,feed_dict={images_placeholder: image})
print("image {} is predicted as {}".format(image_files[i],predictions[0]))
result[image_files[i]] = predictions[0]
result_json = os.path.join(FLAGS.result_path,'predict_result.json')
json.dump(result,open(result_json,'w'),indent=4)
return predictions
def main(_):
print("Predicted strings:")
checkpoint = tf.train.latest_checkpoint(
FLAGS.train_log_dir,
latest_filename=None)
print(checkpoint)
if not os.path.exists(FLAGS.result_path):
os.mkdir(FLAGS.result_path)
predictions = run(checkpoint, FLAGS.batch_size, FLAGS.dataset_name,
FLAGS.image_path)
if __name__ == '__main__':
tf.app.run()
|
jd-aig/aves2_algorithm_components
|
src/nlp/PlatformNlp/tasks/multi_class.py
|
<filename>src/nlp/PlatformNlp/tasks/multi_class.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import codecs
import random
from PlatformNlp import utils
from PlatformNlp.data.dictionary import Dictionary
from PlatformNlp.tasks import PlatformTask, register_task
import tensorflow as tf
logger = logging.getLogger(__name__)
@register_task('multi_class')
class MultiClassTask(PlatformTask):
"""
Sentence (or sentence pair) prediction (classification or regression) task.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('--num_classes', type=int, default=-1,
help='number of classes or regression targets')
def __init__(self, args):
super().__init__(args)
self.args = args
self.model = None
@classmethod
def setup_task(cls, args, **kwargs):
assert args.num_classes > 0, 'Must set --num_classes'
return MultiClassTask(args)
def build_model(self):
from PlatformNlp import models
model = models.build_model(self.args, self)
self.model = model
return model
def max_seq_length(self):
return self.max_seq_length
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.