content stringlengths 5 1.05M |
|---|
from statsmodels.regression.linear_model import OLS
import numpy as np
def _calc_nodewise_row(exog, idx, alpha):
"""calculates the nodewise_row values for the idxth variable, used to
estimate approx_inv_cov.
Parameters
----------
exog : array_like
The weighted design matrix for the current partition.
idx : scalar
Index of the current variable.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Returns
-------
An array-like object of length p-1
Notes
-----
nodewise_row_i = arg min 1/(2n) ||exog_i - exog_-i gamma||_2^2
+ alpha ||gamma||_1
"""
p = exog.shape[1]
ind = list(range(p))
ind.pop(idx)
# handle array alphas
if not np.isscalar(alpha):
alpha = alpha[ind]
tmod = OLS(exog[:, idx], exog[:, ind])
nodewise_row = tmod.fit_regularized(alpha=alpha).params
return nodewise_row
def _calc_nodewise_weight(exog, nodewise_row, idx, alpha):
"""calculates the nodewise_weightvalue for the idxth variable, used to
estimate approx_inv_cov.
Parameters
----------
exog : array_like
The weighted design matrix for the current partition.
nodewise_row : array_like
The nodewise_row values for the current variable.
idx : scalar
Index of the current variable
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
Returns
-------
A scalar
Notes
-----
nodewise_weight_i = sqrt(1/n ||exog,i - exog_-i nodewise_row||_2^2
+ alpha ||nodewise_row||_1)
"""
n, p = exog.shape
ind = list(range(p))
ind.pop(idx)
# handle array alphas
if not np.isscalar(alpha):
alpha = alpha[ind]
d = np.linalg.norm(exog[:, idx] - exog[:, ind].dot(nodewise_row))**2
d = np.sqrt(d / n + alpha * np.linalg.norm(nodewise_row, 1))
return d
def _calc_approx_inv_cov(nodewise_row_l, nodewise_weight_l):
"""calculates the approximate inverse covariance matrix
Parameters
----------
nodewise_row_l : list
A list of array-like object where each object corresponds to
the nodewise_row values for the corresponding variable, should
be length p.
nodewise_weight_l : list
A list of scalars where each scalar corresponds to the nodewise_weight
value for the corresponding variable, should be length p.
Returns
------
An array-like object, p x p matrix
Notes
-----
nwr = nodewise_row
nww = nodewise_weight
approx_inv_cov_j = - 1 / nww_j [nwr_j,1,...,1,...nwr_j,p]
"""
p = len(nodewise_weight_l)
approx_inv_cov = -np.eye(p)
for idx in range(p):
ind = list(range(p))
ind.pop(idx)
approx_inv_cov[idx, ind] = nodewise_row_l[idx]
approx_inv_cov *= -1 / nodewise_weight_l[:, None]**2
return approx_inv_cov
class RegularizedInvCovariance(object):
"""
Class for estimating regularized inverse covariance with
nodewise regression
Parameters
----------
exog : array_like
A weighted design matrix for covariance
Attributes
----------
exog : array_like
A weighted design matrix for covariance
alpha : scalar
Regularizing constant
"""
def __init__(self, exog):
self.exog = exog
def fit(self, alpha=0):
"""estimates the regularized inverse covariance using nodewise
regression
Parameters
----------
alpha : scalar
Regularizing constant
"""
n, p = self.exog.shape
nodewise_row_l = []
nodewise_weight_l = []
for idx in range(p):
nodewise_row = _calc_nodewise_row(self.exog, idx, alpha)
nodewise_row_l.append(nodewise_row)
nodewise_weight = _calc_nodewise_weight(self.exog, nodewise_row,
idx, alpha)
nodewise_weight_l.append(nodewise_weight)
nodewise_row_l = np.array(nodewise_row_l)
nodewise_weight_l = np.array(nodewise_weight_l)
approx_inv_cov = _calc_approx_inv_cov(nodewise_row_l,
nodewise_weight_l)
self._approx_inv_cov = approx_inv_cov
def approx_inv_cov(self):
return self._approx_inv_cov
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.knowledge_interfaces.relation import Relation
from pycatia.system_interfaces.any_object import AnyObject
class Parameter(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Parameter
|
| Represents the parameter.
| It can be computed from a relation: formula, program, or check. It is an
| abstract object which is not intended to be created as such, but from which the
| integer, bolean, real, and string parameters derive. Here is an example to
| create one:
|
| Dim CATDocs As Documents
| Set CATDocs = CATIA.Documents
| Dim part1 As Document
| Set part1 = CATDocs.Add("CATPart")
| Dim density As RealParam
| Set density = part1.Part.Parameters.CreateReal("density", 2.5)
|
|
| See also:
| IntParam, BoolParam, RealParam, StrParam, Formula, Rule, Check
"""
def __init__(self, com_object):
super().__init__(com_object)
self.parameter = com_object
@property
def comment(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Comment() As CATBSTR
|
| Returns or sets the parameter object comment.
:return: str
:rtype: str
"""
return self.parameter.Comment
@comment.setter
def comment(self, value: str):
"""
:param str value:
"""
self.parameter.Comment = value
@property
def context(self) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Context() As AnyObject (Read Only)
|
| Returns the context of the parameter : a part, a product, a drafting, a process, depending where
| the parameter is.
:return: AnyObject
:rtype: AnyObject
"""
return AnyObject(self.parameter.Context)
@property
def hidden(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Hidden() As boolean
|
| Returns or sets whether the parameter is hidden or should be hidden or not.
:return: bool
:rtype: bool
"""
return self.parameter.Hidden
@hidden.setter
def hidden(self, value: bool):
"""
:param bool value:
"""
self.parameter.Hidden = value
@property
def is_true_parameter(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property IsTrueParameter() As boolean (Read Only)
|
| Returns a boolean saying if the parameter is a true one (real, dimension,
| string, etc.) or a geometrical one (isolated points, curves, surfaces).
:return: bool
:rtype: bool
"""
return self.parameter.IsTrueParameter
@property
def optional_relation(self) -> Relation:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property OptionalRelation() As Relation (Read Only)
|
| Returns the relation that can be used to compute the parameter. As this
| relation might not exist, NULL may be returned, so a test is
| required.
|
| Example:
| This example checks if there is a relation to compute the param1
| parameter, and if no relation exists, displays a message
| box:
|
| Set param1_rel = param1.OptionalRelation
| If param1_rel is Nothing Then
| MsgBox "No relation to compute param1"
| End If
:return: Relation
:rtype: Relation
"""
return Relation(self.parameter.OptionalRelation)
@property
def read_only(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ReadOnly() As boolean (Read Only)
|
| Returns whether the parameter can be modified.
|
| Example:
| This example checks if the param1 parameter can be modified, and if it
| cannot, displays a message box:
|
| If ( param1.ReadOnly ) Then
| MsgBox "No way to change param1"
| End If
:return: bool
:rtype: bool
"""
return self.parameter.ReadOnly
@property
def renamed(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property Renamed() As boolean (Read Only)
|
| Returns a boolean saying if the parameter is a renamed parameter or not.
:return: bool
:rtype: bool
"""
return self.parameter.Renamed
@property
def user_access_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property UserAccessMode() As long (Read Only)
|
| Returns the user access mode of the parameter.
|
| 0
| Read only parameter (cannot be destroyed).
| 1
| Read/write parameter (cannot be destroyed).
| 2
| User parameter (can be read, written and destroyed).
| Methods
|
| o Sub Rename(CATBSTR iName)
|
| Renames the parameter.
|
| Parameters:
|
| iName
| The new name of the parameter. If iName contains "Local:" prefix
| the rename will affect the local name. If not, it will affect the global name.
|
|
| Example:
| This example renames the param1 parameter to
| PartSeatbodyMinimumThickness:
|
| Call param1.Rename("PartSeatbodyMinimumThickness")
|
|
| o Sub ValuateFromString(CATBSTR iValue)
|
| Valuates a parameter using a string as input. The string depends on
| parameter nature :
|
| "True" or "False" for Boolean
|
| a numerical value for Integer or Real
|
| a numerical value with or without a unit for Dimension
|
| Parameters:
|
| iValue
| The value to assign to the dimension parameter
|
| Example:
| This example sets the value of the existing dimension parameter to a
| new value:
|
| dimension.ValuateFromString("300mm");
|
|
| o Func ValueAsString() As CATBSTR
|
| Returns the value of the parameter as a string.
|
| Example:
| This example gets the value of the existing dimension parameter and shows
| it in a message box
|
| Dim str
| str = dimension.ValueAsString;
| MessageBox str
|
|
| Copyright © 1999-2011, Dassault Systèmes. All rights
| reserved.
:return: int
:rtype: int
"""
return self.parameter.UserAccessMode
def rename(self, i_name: str) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Sub Rename(CATBSTR iName)
|
| Renames the parameter.
|
| Parameters:
|
| iName
| The new name of the parameter. If iName contains "Local:" prefix
| the rename will affect the local name. If not, it will affect the global name.
|
|
| Example:
| This example renames the param1 parameter to
| PartSeatbodyMinimumThickness:
|
| Call param1.Rename("PartSeatbodyMinimumThickness")
:param str i_name:
:return: None
:rtype: None
"""
return self.parameter.Rename(i_name)
def valuate_from_string(self, i_value: str) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Sub ValuateFromString(CATBSTR iValue)
|
| Valuates a parameter using a string as input. The string depends on
| parameter nature :
|
| "True" or "False" for Boolean
|
| a numerical value for Integer or Real
|
| a numerical value with or without a unit for Dimension
|
| Parameters:
|
| iValue
| The value to assign to the dimension parameter
|
| Example:
| This example sets the value of the existing dimension parameter to a
| new value:
|
| dimension.ValuateFromString("300mm");
:param str i_value:
:return: None
:rtype: None
"""
return self.parameter.ValuateFromString(i_value)
def value_as_string(self) -> str:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func ValueAsString() As CATBSTR
|
| Returns the value of the parameter as a string.
|
| Example:
| This example gets the value of the existing dimension parameter and shows
| it in a message box
|
| Dim str
| str = dimension.ValueAsString;
| MessageBox str
|
|
| Copyright © 1999-2011, Dassault Systèmes. All rights
| reserved.
:return: str
:rtype: str
"""
return self.parameter.ValueAsString()
def __repr__(self):
return f'Parameter(name="{self.name}")'
|
from setuptools import setup
setup(name='abberivator_flow',
version='0.0.1',
description='nan',
url='#',
author='dinhanhx',
author_email='dinhanhx@gmail.com',
license='The Unlicensed',
packages=['abberivator_flow'],
zip_safe=False)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from enum import Enum, unique
from math import sqrt
from typing import Optional, List, Dict
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops import PoolingMode
@unique
class PoolingType(Enum):
SUM = "SUM"
MEAN = "MEAN"
NONE = "NONE"
@unique
class DataType(Enum):
"""
Our fusion implementation supports only certain types of data
so it makes sense to retrict in a non-fused version as well.
"""
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
INT4 = "INT4"
INT2 = "INT2"
DATA_TYPE_NUM_BITS: Dict[DataType, int] = {
DataType.FP32: 32,
DataType.FP16: 16,
DataType.INT8: 8,
DataType.INT4: 4,
DataType.INT2: 2,
}
def dtype_to_data_type(dtype: torch.dtype) -> DataType:
if dtype == torch.quint8 or dtype == torch.qint8:
return DataType.INT8
elif dtype == torch.quint4 or dtype == torch.qint4:
return DataType.INT4
elif dtype == torch.quint2 or dtype == torch.qint2:
return DataType.INT2
else:
raise Exception(f"Invalid data type {dtype}")
def pooling_type_to_pooling_mode(pooling_type: PoolingType) -> PoolingMode:
if pooling_type == PoolingType.SUM:
return PoolingMode.SUM
elif pooling_type == PoolingType.MEAN:
return PoolingMode.MEAN
else:
raise Exception(f"Invalid pooling type {pooling_type}")
def pooling_type_to_str(pooling_type: PoolingType) -> str:
if pooling_type == PoolingType.SUM:
return "sum"
elif pooling_type == PoolingType.MEAN:
return "mean"
else:
raise ValueError(f"Unsupported pooling type {pooling_type}")
def data_type_to_sparse_type(data_type: DataType) -> SparseType:
if data_type == DataType.FP32:
return SparseType.FP32
elif data_type == DataType.FP16:
return SparseType.FP16
elif data_type == DataType.INT8:
return SparseType.INT8
elif data_type == DataType.INT4:
return SparseType.INT4
elif data_type == DataType.INT2:
return SparseType.INT2
else:
raise ValueError(f"Invalid DataType {data_type}")
@dataclass
class BaseEmbeddingConfig:
num_embeddings: int
embedding_dim: int
name: str = ""
data_type: DataType = DataType.FP32
feature_names: List[str] = field(default_factory=list)
weight_init_max: Optional[float] = None
weight_init_min: Optional[float] = None
def get_weight_init_max(self) -> float:
if self.weight_init_max is None:
return sqrt(1 / self.num_embeddings)
else:
return self.weight_init_max
def get_weight_init_min(self) -> float:
if self.weight_init_min is None:
return -sqrt(1 / self.num_embeddings)
else:
return self.weight_init_min
def num_features(self) -> int:
return len(self.feature_names)
@dataclass
class EmbeddingTableConfig(BaseEmbeddingConfig):
pooling: PoolingType = PoolingType.SUM
is_weighted: bool = False
has_feature_processor: bool = False
embedding_names: List[str] = field(default_factory=list)
@dataclass
class EmbeddingBagConfig(BaseEmbeddingConfig):
pooling: PoolingType = PoolingType.SUM
@dataclass
class EmbeddingConfig(BaseEmbeddingConfig):
pass
|
from .abstract import Serializable
class Conflict(Serializable):
def __init__(self, **kwargs):
self.name = kwargs.get('name', '')
self.date = kwargs.get('date', None)
self.start_date = kwargs.get('start_date', None)
self.id = kwargs.get('id', '')
self.source = kwargs.get('source', '')
|
#!/usr/bin/python
from tkinter import *
from PIL import Image,ImageTk
import os,re,threading,pyfiglet
import validate, dengine
#beta version and will be added soon
class GifLabel(Label): #only gif player
global GifLabel
def __init__(self, master, filename,frame):
gif_image = Image.open(filename)
seq = []
try:
while 1:
seq.append(gif_image.copy())
gif_image.seek(len(seq)) # skip to next frame
except EOFError:
pass # we're done
try:
self.delay = frame #gif_image.info['duration']
except KeyError:
self.delay = 100
first = seq[0].convert('RGBA')
self.frames = [ImageTk.PhotoImage(first)]
Label.__init__(self, master, image=self.frames[0],bg='#000000',border=0)
temp = seq[0]
for image in seq[1:]:
temp.paste(image)
frame = temp.convert('RGBA')
self.frames.append(ImageTk.PhotoImage(frame))
self.idx = 0
self.cancel = self.after(self.delay, self.play)
def play(self):
self.config(image=self.frames[self.idx])
self.idx += 1
if self.idx == len(self.frames):
self.idx = 0
self.cancel = self.after(self.delay, self.play)
#Move Window
def move_window(event):
root.geometry('+{0}+{1}'.format(event.x_root, event.y_root))
def on_closing(*arg):
#if messagebox.askokcancel("Quit", "Do you want to quit?"):
root.destroy()
os._exit(0)
#Change Page
def changepage():
global page_Num, root,app
app.pack_forget()
if page_Num == 1:
tuby_plus(root)
page_Num = 2
else:
tuby(root)
page_Num = 1
def validation(event):
global url_link
OnPressed_Download()
url_link = url.get()
validate.redirect_link(url_link)
def net_check(*args):
global status_text,status
status_text = Label(app, font = ('Courier 15 bold'),bg='#2c2c2c',fg='white')
status_text.place(x = 27,y=267)
status = Label(app,bg='#2c2c2c')
status.place(x=10,y=270)
while True:
status_img = validate.check(online_img,offline_img)
status.config(image = status_img)
off_or_on = StringVar()
off_or_on.set('online') if status_img==online_img else off_or_on.set('offline')
status_text.config(textvariable = off_or_on,)
def loading(chunk,file_size,remaining):
file_downloaded = int(dengine.file_size-remaining)
pers = str((file_downloaded/dengine.file_size)*100)
per.set(pers)
print(pers)
#if per == 100:
# multi-tasking with internet connection check and downloading video
def downThread(url_link,loading_label):
print('Hello World!!')
download_thread = threading.Thread(target=dengine.ytdownloader(url_link))
download_thread.start()
def tuby_plus(root):
app = Frame(root,bg='#2c2c2c')
app.pack()
def clear_entry(*args):
url.delete(0, END)
def OnPressed_Download(*args):
print('OnPressed_Download')
app.pack_forget()
loading_gif.place(x=180,y=50)
loading_label.pack(side="bottom",fill=X)
def OnHover_Download(*args):
#print('OnHover_Download')
download_text.config(fg='#2c2c2c')
def OnLeave_Download(*args):
#print('OnLeave_Download')
download_text.config(fg='#f3f3f3')
#Minimal Ui
def tuby(root):
global app,url_img,url_label,Download_label,downloader_label,download_text,downloader_text,url,thread
app = Frame(root,bg='#2c2c2c')
app.pack(fill='both', expand=True)
downloader_text = Label(app,text = 'uby Downloader',font=('Calibri',15,'bold'),bg='#2c2c2c',fg='white')
downloader_text.place(x=65,y=45)
downloader_label = Label(app,image = downloader_img,bg='#2c2c2c' )
downloader_label.place(x=15,y=25)
#Internet Check
thread = threading.Thread(target= net_check)
thread.start()
url_label = Label(app,image = url_img,bg='#2c2c2c')
url_label.place(x=30,y=140)
url = Entry(app, width = 35,border=1, relief= SUNKEN , font = ('verdana',15))
url.place(x=90,y=140)
url.bind("<Button-1>", clear_entry)
url.insert(0, 'Enter a Url')
Download_label = Label(app,image = download_img,bg='#2c2c2c')
Download_label.place(x=160,y=180)
Download_label.bind('<Button-1>', validation)
Download_label.bind('<Enter>', OnHover_Download)
Download_label.bind('<Leave>', OnLeave_Download)
download_text = Label(app,text = 'Download',font=('Helvetica',20,'bold','italic'),bg='#df0024',fg='white')
download_text.place(x=200,y=218)
download_text.bind('<Button-1>', validation)
download_text.bind('<Enter>', OnHover_Download)
def mode_switch():
global btnState
if btnState: #Dark Mode
mode.config(image=dark_img, bg='#202020',activebackground='#090909',bd=0,highlightcolor="#202020", highlightbackground="#202020",)
root['bg'] = ('#202020')
title_bar.config(bg='#212121')
close_button.config(bg='#090909',fg='#888',highlightcolor="#090909", highlightbackground="#090909")
add_button.config(bg='#090909',fg='#888',highlightcolor="#090909", highlightbackground="#090909")
minus_button.config(bg='#090909',fg='#888',highlightcolor="#090909", highlightbackground="#090909")
app.config(bg='#2c2c2c')
downloader_text.config(bg='#2c2c2c',fg='white')
downloader_label.config(bg='#2c2c2c')
url_label.config(bg='#2c2c2c')
Download_label.config(bg='#2c2c2c')
status_text.config(bg='#2c2c2c',fg='#f3f3f3')
status.config(bg='#2c2c2c',fg='#f3f3f3')
btnState = False
else: #Light Mode
mode.config(image=light_img,bg='#cccccc',activebackground='#cccccc',bd=0,highlightcolor="#cccccc", highlightbackground="#cccccc", )
root['bg']=('white')
title_bar.config(bg='#cccccc')
close_button.config(bg='#74777a',fg='black',activebackground='#bb0000', highlightcolor="#74777a", highlightbackground="#74777a")
add_button.config(bg='#74777a', fg='black',activebackground='#e9730c', highlightcolor="#74777a", highlightbackground="#74777a")
minus_button.config(bg='#74777a',fg='black',activebackground="#107e3e", highlightcolor="#74777a", highlightbackground="#74777a")
app.config(bg='#f3f3f3')
downloader_text.config(bg='#f3f3f3',fg='black')
downloader_label.config(bg='#f3f3f3')
url_label.config(bg='#f3f3f3')
Download_label.config(bg='#f3f3f3')
status_text.config(bg='#f3f3f3',fg='#2c2c2c')
status.config(bg='#f3f3f3',fg='#2c2c2c')
loading_img = ('light-loading.gif')
loading_gif = GifLabel(root,srcPath+'/assets/'+loading_img,100)
btnState = True
if __name__ == "__main__":
global loading_label,per,loading_img
print(pyfiglet.figlet_format("Tuby", font = "slant" ) )
page_Num = 1
srcPath = os.path.dirname(os.path.abspath(__file__))
root = Tk()
root.protocol("WM_DELETE_WINDOW", on_closing)
#root.overrideredirect(True)
#root.wm_attributes('-type', 'splash')
#root.update_idletasks()
root['bg'] = ('black') #202020
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x_coordinate = (screen_width/2) - (600/2)
y_coordinate = (screen_height/2) - (350/2)
root.geometry("{}x{}+{}+{}".format(600, 350, int(x_coordinate), int(y_coordinate)))
#Title Bar
title_bar = Frame(root, bg='#212121', relief='raised', bd=0, height=20, width=600)
close_button = Button(title_bar ,text='X', command=on_closing, width=1, bg="#090909", fg="#888",activebackground='#ff453a',activeforeground='white', bd=0,highlightcolor="#090909", highlightbackground="#090909",)
add_button = Button(title_bar ,text='+', command=changepage, width=1, bg="#090909", fg="#888",activebackground='#ff9f0a',activeforeground='black', bd=0,highlightcolor="#090909", highlightbackground="#090909",)
minus_button = Button(title_bar ,text='_', command=root.destroy, width=1, bg="#090909", fg="#888",activebackground='#32d74b',activeforeground='white', bd=0,highlightcolor="#090909", highlightbackground="#090909",)
btnState = False
dark_img = Image.open(srcPath + '/assets/dark-mode.png')
dark_img = dark_img.resize((37,20),Image.ANTIALIAS)
dark_img = ImageTk.PhotoImage(dark_img)
light_img = Image.open(srcPath + '/assets/light-mode.png')
light_img = light_img.resize((37,20),Image.ANTIALIAS)
light_img = ImageTk.PhotoImage(light_img)
mode = Button(title_bar,image= dark_img,command=mode_switch,bg='#202020',activebackground='#090909',bd=0,highlightcolor="#202020", highlightbackground="#202020",)
mode.pack(side='left')
title_bar.pack(side='top', fill=X)
close_button.pack(side='right')
add_button.pack(side='right')
minus_button.pack(side='right')
title_bar.bind('<B1-Motion>', move_window)
offline_img = Image.open(srcPath + '/assets/red.png')
offline_img = offline_img.resize((10,10),Image.ANTIALIAS)
offline_img = ImageTk.PhotoImage(offline_img)
online_img = Image.open(srcPath + '/assets/green.png')
online_img = online_img.resize((10,10),Image.ANTIALIAS)
online_img = ImageTk.PhotoImage(online_img)
downloader_img = Image.open(srcPath + '/assets/downloader.png')
downloader_img = downloader_img.resize((50,50),Image.ANTIALIAS)
downloader_img = ImageTk.PhotoImage(downloader_img)
url_img = Image.open(srcPath + '/assets/url.png')
url_img = url_img.resize((30,30),Image.ANTIALIAS)
url_img = ImageTk.PhotoImage(url_img)
download_img = Image.open(srcPath + '/assets/download.png')
download_img = download_img.resize((260,100),Image.ANTIALIAS)
download_img = ImageTk.PhotoImage(download_img)
tuby(root)
loading_img = ('light-loading.gif')
loading_gif = GifLabel(root,srcPath+'/assets/'+loading_img,100)
per = StringVar()
per.set('Please wait...')
loading_label= Label(root,textvariable = per,bg='white',fg='black')
copyright = Label(root, text="Cup ,\xa9 2020", bg= "red",fg="white" )
copyright.pack(side="bottom",fill=X)
root.mainloop() |
from .base_estimator import BaseEstimator
from .base_module import BaseModule
from .base_learning_rate import BaseLearningRate
__all__ = ["BaseEstimator", "BaseModule", "BaseLearningRate"]
|
# Generated by Django 2.1.5 on 2019-01-23 11:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0014_auto_20190122_1817'),
('articles', '0016_merge_20190123_0814'),
]
operations = [
]
|
#!/usr/bin/env python3
"""
tensorflow
"""
import tensorflow as tf
def create_Adam_op(loss, alpha, beta1, beta2, epsilon):
"""
adam
"""
return tf.train.AdamOptimizer(alpha, beta1, beta2, epsilon).minimize(loss)
|
import os
import sys
import tempfile
from datetime import datetime
from pprint import pprint
import ray
from ray import tune
from ray.rllib.agents import Trainer
from ray.tune.logger import UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from command_line_tools.run_tools import setup_run
from scenario.trajectory_tracking.experiment.experiment_common import setup_environment
from trainer.coordinated_dps_trainer import CoordinatedDPSTrainer
from trainer.es_actual import ESActualTrainer
from trainer.es_co_trainer import ESCOTrainer
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def train(rllib_config, reporter):
ego_starting_distance = 600.0
environment, trainer = make_environment_and_controller(None, rllib_config)
# trainer = make_trainer(config)
checkpoint_frequency = 1
max_iters = int(100e3)
# def set_starting_distance(ego_starting_distance):
# trainer.workers.foreach_worker(
# lambda ev: ev.foreach_env(
# lambda env: env.process.set_starting_distance(ego_starting_distance)))
#
# # def set_starting_distance(ego_starting_distance):
# # for worker in trainer._workers:
# # print(worker)
# # worker.env.process.set_starting_distance(ego_starting_distance)
#
# set_starting_distance(ego_starting_distance)
for i in range(max_iters):
result = trainer.train()
reporter(**result)
if i % checkpoint_frequency == 0:
# checkpoint_path = trainer.logdir
# checkpoint_path = os.path.join(checkpoint_path, get_setting(config, 'experiment'))
# checkpoint_path = os.path.join(checkpoint_path, get_setting(config, 'name'))
# print('ld:', trainer.logdir, 'n:', get_setting(config, 'name'), 'c', get_setting(config, 'checkpoint'),
# 'p',
# checkpoint_path)
# trainer.save(checkpoint_path)
checkpoint_path = trainer.save()
print('saved to checkpoint ', checkpoint_path)
def on_episode_end(info):
# print(info)
episode = info['episode']
# print(info)
# trainer = info['trainer']
base_env = info['env']
# episode.custom_metrics['ego_starting_distance'] = base_env.get_unwrapped()[0].process.ego_starting_distance
print('begin trainer')
default_config = common_default_config
ray_num_cpus = None
if len(sys.argv) >= 4 and sys.argv[-3] == 'ray':
redis_password = sys.argv[-2]
ray_num_cpus = int(sys.argv[-1])
ray.init(address=os.environ["ip_head"], _redis_password=redis_password)
sys.argv = sys.argv[0:-3]
# del sys.argv[-1:-4]
print('ray configuration: ', redis_password, ray_num_cpus, 'argv: ', sys.argv)
else:
if not ray.is_initialized():
ray.init()
print('setup config')
config, run_prefix = setup_run(default_config)
# config, this_env = setup_environment_config(config)
print("Nodes in the Ray cluster:")
pprint(ray.nodes())
pprint(ray.cluster_resources())
if ray_num_cpus is not None:
config['rllib']['num_workers'] = ray_num_cpus - 1
rllib_config = make_rllib_config(config)
print('running tune')
tune.run(
train,
name=config['name'],
trial_name_creator=lambda trial: config['name'],
config=rllib_config,
# local_dir='~/ray_results'
# resources_per_trial={'gpu':1},
)
print('shutting down')
ray.shutdown()
print('done') |
# ch3/example5.py
import queue
import threading
import time
class MyThread(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
def run(self):
print('Starting thread %s.' % self.name)
process_queue()
print('Exiting thread %s.' % self.name)
def process_queue():
while True:
try:
x = my_queue.get(block=False)
except queue.Empty:
return
else:
print_factors(x)
time.sleep(1)
def print_factors(x):
result_string = 'Positive factors of %i are: ' % x
for i in range(1, x + 1):
if x % i == 0:
result_string += str(i) + ' '
result_string += '\n' + '_' * 20
print(result_string)
# setting up variables
input_ = [1, 10, 4, 3]
# filling the queue
my_queue = queue.Queue()
for x in input_:
my_queue.put(x)
# initializing and starting 3 threads
thread1 = MyThread('A')
thread2 = MyThread('B')
thread3 = MyThread('C')
thread1.start()
thread2.start()
thread3.start()
# joining all 3 threads
thread1.join()
thread2.join()
thread3.join()
print('Done.')
|
'''
@Author: WANG Maonan, Yanhui Wu
@Date: 2020-12-26 13:23:34
@Description: 对 session 中所有 packet 的匿名化处理
@LastEditTime: 2021-02-05 21:20:14
'''
import os
from scapy.all import sniff, wrpcap
from TrafficFlowClassification.TrafficLog.setLog import logger
def customAction(pcap):
"""对一个 session 中的每一个 packet 进行匿名化的处理
Args:
pcap: 每一个 packet 文件
"""
src_ip = "0.0.0.0"
src_ipv6 = "0:0:0:0:0:0:0:0"
src_port = 0
src_mac = "00:00:00:00:00:00"
dst_ip = "0.0.0.0"
dst_ipv6 = "0:0:0:0:0:0:0:0"
dst_port = 0
dst_mac = "00:00:00:00:00:00"
if 'Ether' in pcap:
pcap.src = src_mac # 修改源 mac 地址
pcap.dst = dst_mac # 修改目的 mac 地址
if 'IP' in pcap:
pcap["IP"].src = src_ip
pcap["IP"].dst = dst_ip
if 'IPv6' in pcap:
pcap["IPv6"].src = src_ipv6
pcap["IPv6"].dst = dst_ipv6
if 'TCP' in pcap:
pcap['TCP'].sport = src_port
pcap['TCP'].dport = dst_port
if 'UDP' in pcap:
pcap['UDP'].sport = src_port
pcap['UDP'].dport = dst_port
if 'ARP' in pcap:
pcap["ARP"].psrc = src_ip
pcap["ARP"].pdst = dst_ip
pcap["ARP"].hwsrc = src_mac
pcap["ARP"].hwdst = dst_mac
def session_anonymize(session_path):
"""对一个 session 进行匿名化处理, 逐个处理 session 中的每一个 packet;
这里如果一个 pcap 较大, 会出现 MemoryError 的问题.
可以在 sniff 的时候设置 count, 因为后面需要进行减裁, 所以不需要对 pcap 全部进行匿名化, 只需要匿名化部分就可以了
Args:
filePath (str): session 所在的路径
"""
packets = sniff(offline=session_path, prn=customAction, store=True, count=10000) # 这里的数据类型是 scapy.plist.PacketList
return packets
def anonymize(folder_path):
"""将 folder_path 中所有的 session 全部进行匿名化, 同时删除一个 session 少于 3 个 packet 的 session
Args:
folder_path (str): 所在的路径
"""
for (root, _, files) in os.walk(folder_path):
logger.info('正在匿名化 {} 下的 pcap 文件'.format(root))
for Ufile in files:
pcapPath = os.path.join(root, Ufile) # 需要转换的pcap文件的完整路径
packets = session_anonymize(pcapPath) # 匿名化 session
os.remove(pcapPath) # 删除原始的 pcap 文件
if len(packets)>3: # 如果一个 session 中 packet 的个数较少, 就不保存
wrpcap(pcapPath, packets) # 保存新的 pcap 文件
logger.info('匿名化处理完成!')
logger.info('==========\n')
|
pratic = {
'praticagem': [{
"Navio": 'COSCO SHIPPING THAMES',
'Tipo': 'Container Ship',
'MMSI': 477333900,
},
{
"Navio": 'EVER LOADING',
'Tipo': 'Container Ship',
'MMSI': 235102681,
},
{
"Navio": 'SAN ANTONIO EXPRESS',
'Tipo': 'Container Ship',
'MMSI': 725001534,
},
{
"Navio": 'LOG-IN PANTANAL',
'Tipo': 'Container Ship',
'MMSI': 710003840,
},
{
"Navio": 'DUBLIN EXPRESS',
'Tipo': 'Container Ship',
'MMSI': 310825000,
},
{
"Navio": 'MARIANETTA',
'Tipo': 'Container Ship',
'MMSI': 636014968,
},
{
"Navio": 'CASTILLO DE MACEDA',
'Tipo': 'Chemical/Products Tanker',
'MMSI': 710003610,
},
{
"Navio": 'BBC MARYLAND',
'Tipo': 'General Cargo Ship',
'MMSI': 305459000,
},
{
"Navio": 'GSL MELINA',
'Tipo': 'Container Ship',
'MMSI': 636017803,
},
{
"Navio": 'POLAR CHILE',
'Tipo': 'Container Ship',
'MMSI': 636018346,
},
{
"Navio": 'ANTHEA Y',
'Tipo': 'Container Ship',
'MMSI': 636016986,
},
{
"Navio": 'YM TRUST',
'Tipo': 'Container Ship',
'MMSI': 5631141200,
},
]}
|
from json import loads
from crawler import Crawler
import urllib.request as req
from time import perf_counter, sleep
from strformat import StrFormat
class NativeCrawler(Crawler):
def __init__(self, runtime: dict) -> None:
super().__init__(runtime)
self.illust_url_base = self.config["format"]["mirror"]["illustrator"]
self.pic_url_base = self.config["format"]["mirror"]["origin"]
def map(self, id: str):
response = req.urlopen(self.illust_url_base + id)
page = bytes.decode(response.read())
start = page.find("<title>") + 7
end = page.find(" - p站")
if page[start: start + 2] == "p站":
StrFormat.severe_warning(f"Illustrator ID {id} not found.")
return
self.illusts[id] = page[start:end]
def expand(self, key: str, value: str, num: int):
res = []
for i in range(num):
res.append((f"{self.pic_url_base}{value}_p{i}.jpg", False))
res.append((f"{self.pic_url_base}{value}_p{i}.png", False))
if key not in self.pictures:
self.pictures[key] = res
else:
self.pictures[key].extend(res)
def get(self, id: str, name: str):
limit = 30
offset = 0
total = 0
while True:
webp = req.urlopen(f"https://www.vilipix.com/api/illust?user_id={id}&limit={limit}&offset={offset}")
webp_json = loads(webp.read().decode('utf-8'))['rows']
if webp_json == []:
break
for row in webp_json:
num = row['page_count']
regular_url = row['regular_url']
start = regular_url.find("regular/") + 8
end = regular_url.rfind("_p")
self.expand(f"{id} {name}", regular_url[start:end], num)
if self.cap and total >= self.cap:
break
total += 1
if self.cap and total >= self.cap:
break
offset += 30
def gets(self):
start = perf_counter()
for id, name in self.illusts.items():
print(f"Getting {name}'s pictures...")
self.get(id, name)
end = perf_counter()
print(f"{StrFormat.functional('Crawling')} completed in {StrFormat.time_str(end-start)}.")
|
#!/usr/bin/env python3
import os
import random
import time
import json
import requests
from .configuration import logger
from requests import Timeout
from .helpers import format_address
from kubernetesClient.kubernetesClient import KubernetesClient
from message.message import Message
from partialView.partialView import PartialView, PodDescriptor
from apscheduler.schedulers.background import BackgroundScheduler
class Cyclon(object):
def __init__(self):
self.ip = os.environ['MY_POD_IP']
self.k8s = KubernetesClient()
self.api_version = 'v1'
self.partialView = PartialView(self.ip)
self.bootstrap()
def bootstrap(self):
self.bootstrap_exponential_backoff(5, 5)
self.schedule_change(15, 15)
def bootstrap_exponential_backoff(self, initial_delay, delay):
logger.debug("Init", ip=self.ip, partialView=self.partialView)
time.sleep(initial_delay)
app_name = os.environ['APP']
attempt = 1
ips = self.k8s.list_pods_ips_by_field_selector(label_selector="app="+app_name, field_selector="status.phase=Running")
logger.debug("Bootstrapping", running_pods=ips, attempt=attempt)
# Exponential backoff starts in case the number of running pods is lower than the partialView's limit.
# TODO: Did I consider also that some pods might not be ready yet?
# TODO: Consider that there is no need to have at least self.partialView.limit peers ready to start!
# TODO: There can be peers running with an initial partial view of size < self.partialView.limit
while len(ips) <= self.partialView.limit:
attempt += 1
delay *= 2
time.sleep(delay)
ips = self.k8s.list_pods_ips_by_field_selector(label_selector="app=epto", field_selector="status.phase=Running")
logger.debug("Bootstrapping", running_pods=ips, attempt=attempt)
# I populate the PartialView and I avoid to consider myself
try:
ips.remove(self.ip)
except ValueError:
logger.debug("self.ip was not there")
while not self.partialView.is_full():
random_ip = random.choice(ips)
# TODO: REPLACE WITH self.partialView.add_peer_ip(random_ip)
self.partialView.add_peer(PodDescriptor(random_ip, random.randint(0, 9)))
logger.debug("Bootstrapping", partialView=self.partialView)
def schedule_change(self, initial_delay, interval):
initial_delay = random.randint(0, initial_delay)
time.sleep(initial_delay)
scheduler = BackgroundScheduler()
scheduler.add_job(self.shuffle_partial_view, 'interval', seconds=interval, max_instances=1)
scheduler.start()
def shuffle_partial_view(self):
logger.debug("Shuffling", partialView=self.partialView)
# 1) Increase by one the age of all neighbors
logger.debug("Increase by one the age of all neighbors.")
self.partialView.increment()
logger.debug("Increment", partialView=self.partialView)
# 2) Select neighbor Q with the highest age among all neighbors.
logger.debug("Select neighbor Q with the highest age among all neighbors.")
oldest_peer = self.partialView.get_oldest_peer()
logger.debug("SelectOldest", oldest_peer=oldest_peer)
# 3) Select l - 1 other random neighbors (meaning avoid oldest).
logger.debug("Select l - 1 other random neighbors (meaning avoid oldest).")
neighbors = self.partialView.select_neighbors_for_request(oldest_peer)
logger.debug("SelectNeighbors", neighbors=neighbors)
# 4) Replace Q's entry with a new entry of age 0 and with P's address.
logger.debug("Replace Q's entry with a new entry of age 0 and with P's address.")
neighbors.add_peer_ip(self.ip, allow_self_ip=True)
logger.debug("AddMyself", neighbors=neighbors)
try:
# 5) Send the updated subset to peer Q.
logger.debug("Send the updated subset to peer Q (oldest_peer).", oldest_peer=oldest_peer.ip)
response = json.loads(self.send_message(oldest_peer.ip, 'exchange-view', neighbors))
received_partial_view = PartialView.from_dict(response.get('data'))
logger.debug("Received", received_partial_view=received_partial_view)
# 6) I remove the oldest peer from my view
logger.debug("I remove the oldest peer from my view.", oldest_peer=oldest_peer.ip)
self.partialView.remove_peer(oldest_peer)
logger.debug("RemovedOldest", partialView=self.partialView)
# 7) I merge my view with the one just received
logger.debug("I merge my view with the one just received.")
self.partialView.merge(neighbors, received_partial_view)
logger.debug("Merged", partialView=self.partialView)
except Timeout:
logger.error("TimeoutException: Request to " + str(oldest_peer.ip) + " timed out.")
def send_message(self, destination_ip, path, data):
destination = os.getenv('TEST_IP', format_address(destination_ip, 5000))
m = Message(format_address(self.ip, 5000), destination, data)
logger.debug("Request", request=m.to_json())
ret = requests.post(m.destination + '/' + self.api_version + '/' + path, json=m.to_json(), timeout=5)
logger.debug("Response", response=ret.content)
return ret.content
|
BASE_URL = "https://video.ethz.ch/lectures/"
|
"""
TechMiner.datasets
===============================================================================
Overview
-------------------------------------------------------------------------------
The functions in this module allows the user to load bibliographical datasets.
Functions in this module
-------------------------------------------------------------------------------
"""
import pandas as pd
from os.path import join, dirname
from . import RecordsDataFrame
class Bunch(dict):
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
pass
def load_dynacol():
"""Load and return the dynacol dataset.
This dataset contains the bibliographical information about publications
in Scopus of the Dyna-Colombia journal, edited by Facultad de Minas,
Universidad Nacional de Colombia, Sede Medellin, between January, 2010
and September, 2019.
Args:
None.
Returns:
A dictionary.
**Examples**
>>> from techminer.datasets import load_dynacol
>>> data = load_dynacol()
>>> data.data.info() # doctest: +NORMALIZE_WHITESPACE
<class 'techminer.dataframe.RecordsDataFrame'>
Int64Index: 1714 entries, 0 to 999
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Abstract 1714 non-null object
1 Affiliations 1706 non-null object
2 Author Keywords 1681 non-null object
3 Author(s) ID 1714 non-null object
4 Authors 1714 non-null object
5 Cited by 1714 non-null int64
6 Issue 1714 non-null object
7 Language of Original Document 1714 non-null object
8 Page end 1669 non-null float64
9 Page start 1677 non-null float64
10 Title 1714 non-null object
11 Volume 1714 non-null int64
12 Year 1714 non-null int64
dtypes: float64(2), int64(3), object(8)
memory usage: 187.5+ KB
"""
module_path = dirname(__file__)
with open(join(module_path, "datasets/dyna-col.rst")) as rst_file:
fdescr = rst_file.read()
fdata = RecordsDataFrame(pd.read_json(join(module_path, "datasets/dyna-col.json"), orient='index'))
return Bunch(data=fdata, DESCR=fdescr)
def load_dynacol_citedby():
"""Load and return the dynacol-citedby dataset.
This dataset contains the bibliographical information about publications
citing in Scopus the Dyna-Colombia journal, edited by Facultad de Minas,
Universidad Nacional de Colombia, Sede Medellin, between January, 2010
and September, 2019.
Args:
None.
Returns:
A dictionary.
**Examples**
>>> from techminer.datasets import load_dynacol_citedby
>>> data = load_dynacol_citedby().data
>>> len(data) # doctest: +NORMALIZE_WHITESPACE
3406
"""
module_path = dirname(__file__)
with open(join(module_path, "datasets/dyna-col-citedby.rst")) as rst_file:
fdescr = rst_file.read()
fdata = RecordsDataFrame(pd.read_json(join(module_path, "datasets/dyna-col-citedby.json"), orient='index'))
return Bunch(data=fdata, DESCR=fdescr)
def load_autotrading_raw():
"""Load and return the autotrading dataset with raw info.
This dataset contains the raw bibliographical information for publications
in Scopus automatic trading.
Args:
None.
Returns:
A dictionary.
**Examples**
>>> from techminer.datasets import load_autotrading_raw
>>> data = load_autotrading_raw()
>>> data.data.info() # doctest: +NORMALIZE_WHITESPACE
<class 'techminer.dataframe.RecordsDataFrame'>
Int64Index: 212 entries, 0 to 99
Data columns (total 18 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Abstract 212 non-null object
1 Affiliations 194 non-null object
2 Author Keywords 155 non-null object
3 Author(s) ID 212 non-null object
4 Authors 212 non-null object
5 Cited by 212 non-null int64
6 DOI 152 non-null object
7 Document Type 212 non-null object
8 EID 212 non-null object
9 Index Keywords 135 non-null object
10 Issue 83 non-null object
11 Page end 185 non-null float64
12 Page start 185 non-null float64
13 Selected 212 non-null bool
14 Source title 212 non-null object
15 Title 212 non-null object
16 Volume 141 non-null object
17 Year 212 non-null int64
dtypes: bool(1), float64(2), int64(2), object(13)
memory usage: 30.0+ KB
"""
module_path = dirname(__file__)
with open(join(module_path, "datasets/auto-trading-raw.rst")) as rst_file:
fdescr = rst_file.read()
fdata = RecordsDataFrame(
pd.read_json(
join(module_path, "datasets/auto-trading-raw.json"), orient="index"
)
)
return Bunch(data=fdata, DESCR=fdescr)
def load_autotrading_selected():
"""Load and return the autotrading dataset with selected info.
This dataset contains the raw bibliographical information for publications
in Scopus automatic trading.
Args:
None.
Returns:
A dictionary.
**Examples**
>>> from techminer.datasets import load_autotrading_selected
>>> data = load_autotrading_selected()
>>> data.data.info() # doctest: +NORMALIZE_WHITESPACE
<class 'techminer.dataframe.RecordsDataFrame'>
Int64Index: 95 entries, 0 to 99
Data columns (total 17 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Abstract 95 non-null object
1 Affiliations 95 non-null object
2 Author Keywords 76 non-null object
3 Author(s) ID 95 non-null object
4 Authors 95 non-null object
5 Cited by 95 non-null int64
6 DOI 82 non-null object
7 Document Type 95 non-null object
8 EID 95 non-null object
9 Index Keywords 71 non-null object
10 Issue 33 non-null object
11 Page end 88 non-null float64
12 Page start 88 non-null float64
13 Source title 95 non-null object
14 Title 95 non-null object
15 Volume 65 non-null object
16 Year 95 non-null int64
dtypes: float64(2), int64(2), object(13)
memory usage: 13.4+ KB
"""
module_path = dirname(__file__)
with open(join(module_path, "datasets/auto-trading-selected.rst")) as rst_file:
fdescr = rst_file.read()
fdata = RecordsDataFrame(
pd.read_json(
join(module_path, "datasets/auto-trading-selected.json"), orient="index"
)
)
return Bunch(data=fdata, DESCR=fdescr)
def load_test_cleaned():
"""Load and return a test dataset cleaned.
Args:
None.
Returns:
A dictionary.
"""
module_path = dirname(__file__)
with open(join(module_path, "datasets/test-cleaned.rst")) as rst_file:
fdescr = rst_file.read()
fdata = RecordsDataFrame(
pd.read_json(
join(module_path, "datasets/test-cleaned.json"),
orient="records",
lines=True,
)
)
return Bunch(data=fdata, DESCR=fdescr)
def load_test_raw():
"""Load and return a test dataset cleaned.
Args:
None.
Returns:
A dictionary.
"""
module_path = dirname(__file__)
with open(join(module_path, "datasets/test-raw.rst")) as rst_file:
fdescr = rst_file.read()
fdata = RecordsDataFrame(
pd.read_json(
join(module_path, "datasets/test-raw.json"), orient="records", lines=True
)
)
return Bunch(data=fdata, DESCR=fdescr)
|
import glob
import os
import random
import pickle
import shutil
import numpy as np
def split_data(X_train,y_train,X_test,y_test, word_index, MAX_NB_WORDS, MAX_SENTS, MAX_SENT_LENGTH,
singlefn, BATCH_TRAIN_SIZE=20, BATCH_TEST_SIZE=20):
dir, fname = os.path.split(singlefn)
newdir=dir+'/'+fname.split('.')[0]
print(newdir)
if not os.path.isdir(newdir):
os.mkdir(newdir)
train_dir=newdir+'/train'
print(train_dir)
if not os.path.isdir(train_dir):
os.mkdir(train_dir)
else:
shutil.rmtree(train_dir)
os.mkdir(train_dir)
test_dir = newdir + '/test'
print(test_dir)
if not os.path.isdir(test_dir):
os.mkdir(test_dir)
else:
shutil.rmtree(test_dir)
os.mkdir(test_dir)
num_sample_train=X_train.shape[0]
num=0
print('dump train data...')
while num<num_sample_train:
bfxname=train_dir+'/Xy_train.{}.pkl'.format(num)
pickle.dump((X_train[num:num+BATCH_TRAIN_SIZE],y_train[num:num+BATCH_TRAIN_SIZE]), open(bfxname,'wb'))
num+=BATCH_TRAIN_SIZE
num=0
num_sample_test = X_test.shape[0]
print('dump test data')
while num<num_sample_test:
bfxname=test_dir+'/Xy_test.{}.pkl'.format(num)
pickle.dump((X_test[num:num+BATCH_TEST_SIZE],y_test[num:num+BATCH_TEST_SIZE]), open(bfxname,'wb'))
num += BATCH_TEST_SIZE
pickle.dump((word_index, MAX_NB_WORDS, MAX_SENTS, MAX_SENT_LENGTH,
BATCH_TRAIN_SIZE, BATCH_TEST_SIZE,
num_sample_train, num_sample_test),
open(newdir+'/meta.pkl','wb'))
print('done!')
def split_full_data(singlefn, BATCH_TRAIN_SIZE=20, BATCH_TEST_SIZE=20):
print('start load data')
((X_train, y_train),
(X_test, y_test),
word_index,
(MAX_NB_WORDS, MAX_SENTS, MAX_SENT_LENGTH)) \
= pickle.load(open(singlefn, 'rb'))
print('done load data...')
split_data(X_train, y_train, X_test, y_test,
word_index, MAX_NB_WORDS, MAX_SENTS, MAX_SENT_LENGTH,
singlefn, BATCH_TRAIN_SIZE, BATCH_TEST_SIZE)
class File_Generator:
def __init__(self, data_dir , num_class):
self.data_dir=data_dir
self.num_class=num_class
def get_meta(self):
(word_index, MAX_NB_WORDS, MAX_SENTS, MAX_SENT_LENGTH,
BATCH_TRAIN_SIZE, BATCH_TEST_SIZE,
num_sample_train, num_sample_test)=\
pickle.load(open(self.data_dir + '/meta.pkl','rb'))
print('========INFO============')
print((MAX_NB_WORDS,MAX_SENTS,MAX_SENT_LENGTH))
print((BATCH_TRAIN_SIZE, BATCH_TEST_SIZE))
print((num_sample_train, num_sample_test))
return word_index, MAX_NB_WORDS, MAX_SENTS, MAX_SENT_LENGTH, \
BATCH_TRAIN_SIZE, BATCH_TEST_SIZE, num_sample_train, num_sample_test
def train_generator(self):
all_files=glob.glob(self.data_dir+'/train/*.pkl')
while 1:
sall_files=all_files.copy()
random.shuffle(sall_files)
for fn in sall_files:
(Xs,ys)=pickle.load(open(fn,'rb'))
#return
yield (Xs,ys)
def valid_generator(self):
all_files = glob.glob(self.data_dir+'/test/*.pkl')
while 1:
sall_files = all_files.copy()
random.shuffle(sall_files)
for fn in sall_files:
(Xs, ys) = pickle.load(open(fn, 'rb'))
# return
yield (Xs, ys)
if __name__ == '__main__':
# split_full_data('./data/imdb_prep_stem.pkl',20, 20)
split_full_data('./data/big_imdb_prep_ha50200.pkl', 20, 20)
|
import os
from collections import defaultdict
from src.guesser.ZaliznyakGuesser import *
from src.DependencyDerivation import *
class Results:
def __init__(self, words_a: Set[str] = {}, rule_id: str = None):
self.data = defaultdict(set)
self.derived = set()
if rule_id is not None:
self.data[rule_id] |= words_a
self.derived |= words_a
def __add__(self, other):
if other is not None:
for k in other.data:
self.data[k] |= other.data[k]
self.derived |= other.derived
return self
class Derivation:
def __init__(self, use_guesser: bool = False, **kwargs):
self.use_guesser = use_guesser
if self.use_guesser:
self.tag_guesser = ZaliznyakGuesser(**kwargs)
self.pos_all = ['noun', 'adj', 'verb', 'adv', 'num']
self.rules = []
self.rules_complex = []
self.rules_dict = dict()
# basic rules
for pos in self.pos_all:
rule_folder = os.path.join(os.path.dirname(__file__), 'rules', pos, 'simple')
for rule_file in os.listdir(rule_folder):
if not rule_file.endswith('.json'):
continue
self.rules.extend(load_rules_from_json(os.path.join(rule_folder, rule_file), 'b', rule_type='simple'))
for rule in self.rules:
self.rules_dict[rule.name] = rule
# complex rules
for pos in self.pos_all:
rule_folder = os.path.join(os.path.dirname(__file__), 'rules', pos, 'complex')
for rule_file in os.listdir(rule_folder):
if not rule_file.endswith('.json'):
continue
self.rules_complex.extend(load_rules_from_json(os.path.join(rule_folder, rule_file), 'b', rule_type='complex'))
for rule in self.rules_complex:
try:
rule.simple_rules = [self.rules_dict[simple_rule_id] for simple_rule_id in rule.simple_rule_ids]
except AttributeError:
print("AttributeError:", rule.name)
for rule in self.rules_complex:
self.rules_dict[rule.name] = rule
self.rules.extend(self.rules_complex)
# TODO: compound rules
self.rules_compound = []
rule_folder = os.path.join(os.path.dirname(__file__), 'rules')
self.rules_compound.extend(load_rules_from_json(os.path.join(rule_folder, 'compounds.json'), 'b', rule_type='compound'))
for rule in self.rules_compound:
try:
for simple_rule_ids in rule.simple_rule_ids:
rule.simple_rules.append([self.rules_dict[simple_rule_id] for simple_rule_id in simple_rule_ids])
rule.before_merge_rules = [self.rules_dict[simple_rule_id] for simple_rule_id in rule.before_merge_rule_ids]
rule.after_merge_rules = [self.rules_dict[simple_rule_id] for simple_rule_id in rule.after_merge_rule_ids]
except:
print("Error:", rule.name)
def _derive_with_rule(self, word_b: str, pos_b: str = None, pos_a: str = None, rule: WholeRule = None,
**kwargs) -> Results:
results = []
pos_b_all = [pos_b] if pos_b is not None else self.pos_all
pos_a_all = [pos_a] if pos_a is not None else self.pos_all
for pos_b_ in pos_b_all:
for pos_a_ in pos_a_all:
results.extend(rule.apply_with_tags(word_b, pos_b_, pos_a_, **kwargs))
return Results(set(results), rule.name)
def _derive(self, word_b: str, pos_b: str = None, pos_a: str = None, rule: WholeRule = None, **kwargs) -> Results:
results = Results()
if rule is not None:
results += self._derive_with_rule(word_b, pos_b, pos_a, rule, **kwargs)
else:
for rule_ in self.rules:
if (not rule_.only_complex) or kwargs.get('force_complex'):
results += self._derive_with_rule(word_b, pos_b, pos_a, rule_, **kwargs)
return results
def derive(self, word_b: str, pos_b: str = None, pos_a: str = None, rule_id: str = None, is_extended: bool = True,
**kwargs):
if self.use_guesser:
tags_all = self.tag_guesser.guess(word=word_b, pos=pos_b, **kwargs)
else:
tags_all = [dict()]
if not tags_all:
tags_all = [dict()]
rule = self.rules_dict.get(rule_id)
results = Results()
for tags in tags_all:
tags.update(**kwargs)
results += self._derive(word_b, pos_b, pos_a, rule, **tags)
if is_extended:
return {k: results.data[k] for k in results.data if results.data[k]}
else:
return results.derived
"""
evaluator = Derivation(use_guesser=True)
res = evaluator.derive('польза', pos_b='noun', is_extended=True)
#res = evaluator.derive('натянуть', pos_b='verb', is_extended=True)
print(res)
""" |
"""View for retrieving a subscription"""
from ..serializers.SubscriptionSerializer import SubscriptionSerializer
from ...models.Subscription import Subscription
from rest_framework import generics, permissions
class SubscriptionRetrieveView(generics.RetrieveAPIView):
"""Returns instance of Subscription, if user is authenticated"""
model = Subscription
serializer_class = SubscriptionSerializer
lookup_field = 'public_id'
permission_classes = [
# only return the list if user is authenticated
permissions.IsAuthenticated
]
def get_queryset(self):
"""
Filters queryset by the authenticated user
:returns: filtered Subscription objects
:rtype: QuerySet
"""
return self.model.objects.filter(owner=self.request.user)
|
import warnings
import Box2D as box_2d
import numpy as np
from .physical_world import PhysicalObject, PhysicalWorld, GymEnvironment
class Missile(PhysicalObject):
"""Missile."""
def __init__(self, *args, **kwargs):
super(Missile, self).__init__("missile.png", *args, **kwargs)
def create_physical_entity(self):
body = self._engine.CreateDynamicBody(
position=self.physical_position, fixedRotation=True
)
body.CreatePolygonFixture(
box=(
(self.width / 2.0) / self._world.physical_scale,
(self.height / 2.0) / self._world.physical_scale,
),
density=1.0,
friction=0.0,
restitution=0.0,
)
# Constrain missile movements to Y axis.
joint = box_2d.b2PrismaticJointDef()
joint.Initialize(body, self._world.ground, body.worldCenter, (0.0, 1.0))
joint.collideConnected = True
self._engine.CreateJoint(joint)
return body
@classmethod
def fire(cls, world, entity, impulse):
"""Fires a missile."""
raise NotImplementedError
class InvaderMissile(Missile):
"""Invader's missile."""
@classmethod
def fire(cls, world, entity, impulse):
"""Fires a missile."""
missile = cls(
world=world,
position=(entity.position[0], entity.position[1] - entity.height - 10,),
)
missile.apply_impulse(
(0, (-impulse / world.physical_scale) * missile.body.mass)
)
return missile
def should_collide(self, other):
"""Ignore collisions with invaders and own missiles."""
return not isinstance(other, (Invader, InvaderMissile))
def on_contact(self, other):
# Destroy invaders and invader missiles.
if isinstance(other, PlayerShip):
self._world._lives -= 1
self.kill()
class PlayerMissile(Missile):
"""Player's missile."""
@classmethod
def fire(cls, world, entity, impulse):
"""Fires a missile."""
missile = cls(
world=world,
position=(entity.position[0], entity.position[1] + entity.height + 10,),
)
missile.apply_impulse((0, (impulse / world.physical_scale) * missile.body.mass))
return missile
def should_collide(self, other):
"""Ignore collisions with own missiles."""
return not isinstance(other, PlayerMissile)
def on_contact(self, other):
# Destroy invaders and invader missiles.
if isinstance(other, (Invader, InvaderMissile)):
other.kill()
# Increase player score for hitting invaders.
if isinstance(other, Invader):
self._world.add_kill_score()
self.kill()
class Invader(PhysicalObject):
"""Invader."""
# Types of invaders.
TYPE_1 = "invader_1"
TYPE_2 = "invader_2"
TYPE_3 = "invader_3"
def __init__(self, *args, **kwargs):
self._type = kwargs.pop("invader_type")
kwargs.setdefault("color", (0, 255, 0))
kwargs.setdefault("scale", 1)
super(Invader, self).__init__("{}.png".format(self._type), *args, **kwargs)
def create_physical_entity(self):
body = self._engine.CreateStaticBody(position=self.physical_position)
body.CreatePolygonFixture(
box=(
(self.width / 2.0) / self._world.physical_scale,
(self.height / 2.0) / self._world.physical_scale,
),
density=1.0,
friction=0.0,
restitution=0.0,
)
return body
class LeftRightMovingInvader(Invader):
"""Invader which moves left and right."""
max_delta_x = 24
def __init__(self, *args, **kwargs):
super(LeftRightMovingInvader, self).__init__(*args, **kwargs)
self._direction = 1
self._initial_x = self.position[0]
def step(self):
if self.position[0] - self._initial_x >= self.max_delta_x:
self._direction = -1
elif self.position[0] - self._initial_x <= -self.max_delta_x:
self._direction = 1
self.set_body_position((self.position[0] + self._direction, self.position[1]))
super(LeftRightMovingInvader, self).step()
class CrossScreenMovingInvader(Invader):
"""Invader which moves across the whole screen."""
def __init__(self, *args, **kwargs):
super(CrossScreenMovingInvader, self).__init__(*args, **kwargs)
self._direction = 1
def step(self):
if self.position[0] >= self._world._width - self.width:
self._direction = -1
elif self.position[0] <= self.width:
self._direction = 1
self.set_body_position((self.position[0] + self._direction, self.position[1]))
super(CrossScreenMovingInvader, self).step()
class Shield(PhysicalObject):
"""Shield for the player."""
def __init__(self, *args, **kwargs):
self.health = kwargs.pop("health")
kwargs.setdefault("color", (255, 240, 0))
super(Shield, self).__init__("shield.png", *args, **kwargs)
def create_physical_entity(self):
body = self._engine.CreateStaticBody(position=self.physical_position)
body.CreatePolygonFixture(
box=(
(self.width / 2.0) / self._world.physical_scale,
(self.height / 2.0) / self._world.physical_scale,
),
density=1.0,
friction=0.0,
restitution=0.0,
)
return body
def on_contact(self, other):
"""Shield loses health if anything touches it."""
self.health -= 1
if self.health <= 0:
self.kill()
class PlayerShip(PhysicalObject):
"""Player ship."""
def __init__(self, *args, **kwargs):
super(PlayerShip, self).__init__("ship.png", *args, **kwargs)
def create_physical_entity(self):
body = self._engine.CreateDynamicBody(
position=self.physical_position, linearDamping=0.99, fixedRotation=True
)
body.CreatePolygonFixture(
box=(
(self.width / 2.0) / self._world.physical_scale,
(self.height / 2.0) / self._world.physical_scale,
),
density=1.0,
friction=0.0,
restitution=0.0,
)
# Constrain paddle movements to X axis.
joint = box_2d.b2PrismaticJointDef()
joint.Initialize(body, self._world.ground, body.worldCenter, (1.0, 0.0))
joint.collideConnected = True
self._engine.CreateJoint(joint)
return body
class SpaceInvadersWorld(PhysicalWorld):
missile_class = Missile
shield_class = Shield
player_ship_class = PlayerShip
invader_class = LeftRightMovingInvader
# Number of actions.
n_actions = 4
# Player missile parameters.
parameters_player_missile = {
"class": PlayerMissile,
# Firing rate (in steps).
"fire_rate": 20,
# Maximum number of missiles on screen.
"max_missiles": 2,
# Missile impulse.
"missile_impulse": 100,
}
# Invader missile parameters.
parameters_invader_missile = {
"class": InvaderMissile,
# Firing rate (in steps).
"fire_rate": 15,
# Maximum number of missiles on screen.
"max_missiles": 10,
# Missile impulse.
"missile_impulse": 100,
}
# Number of invaders per row.
invaders_per_row = 11
def create_world(self, parent):
# Create world edges.
p_width = self._width / self.physical_scale
p_height = self._height / self.physical_scale
ground = self._engine.CreateStaticBody(position=(0, 0))
ground.CreateEdgeFixture(vertices=[(0, 0), (0, p_height)])
ground.CreateEdgeFixture(vertices=[(0, 0), (p_width, 0)])
ground.CreateEdgeFixture(vertices=[(0, p_height), (p_width, p_height)])
ground.CreateEdgeFixture(vertices=[(p_width, p_height), (p_width, 0)])
self._ground = ground
self.create_invaders()
self.create_shields()
self.player_ship = self.player_ship_class(
world=self, position=self.initial_player_ship_position()
)
parent.add(self.player_ship)
def create_shields(self):
"""Create protective shields."""
for config in self.initial_shield_configuration():
shield = self.shield_class(world=self, **config)
self._batch.add(shield)
def create_invaders(self):
"""Create invader grid."""
offset_x = 80
offset_y = self.initial_invader_row()
for row, invader_type in enumerate(self.initial_invader_configuration()):
for column in range(self.invaders_per_row):
invader = self.invader_class(
world=self, position=(offset_x, offset_y), invader_type=invader_type
)
self._batch.add(invader)
offset_x += 48
offset_x = 80
offset_y -= invader.height * 2
def fire_missile(self, entity, parameters):
# Check if there are not too many missiles on screen already.
def count_missiles(node):
if not isinstance(node, parameters["class"]):
return
return 1
if sum(self.walk(count_missiles)) >= parameters["max_missiles"]:
return
# Enforce firing rate.
last_fire_step = self._last_fire_step.get(parameters["class"], 0)
if self._step - last_fire_step <= parameters["fire_rate"]:
return
self._last_fire_step[parameters["class"]] = self._step
# Fire missile.
missile = parameters["class"].fire(
world=self, entity=entity, impulse=parameters["missile_impulse"]
)
self._batch.add(missile)
@property
def lives(self):
return self._lives
@property
def score(self):
return self._score
@property
def parameters(self):
parameters = super(SpaceInvadersWorld, self).parameters
parameters.update(
{"world": "space_invaders",}
)
return parameters
def ship_impulse(self):
"""Relative paddle impulse strength on movement actions."""
return 50
def act(self, action):
"""Perform external action."""
if action == 0:
# Do nothing.
pass
elif action == 1:
# Move player left.
self.player_ship.apply_impulse(
(
(-self.ship_impulse() / self.physical_scale)
* self.player_ship.body.mass,
0,
)
)
elif action == 2:
# Move player right.
self.player_ship.apply_impulse(
(
(self.ship_impulse() / self.physical_scale)
* self.player_ship.body.mass,
0,
)
)
elif action == 3:
# Fire missile.
self.fire_missile(self.player_ship, self.parameters_player_missile)
def initial_shield_configuration(self):
return [
{"health": 20, "position": (self._width // 4, 200)},
{"health": 20, "position": (2 * self._width // 4, 200)},
{"health": 20, "position": (3 * self._width // 4, 200)},
]
def initial_invader_row(self):
return self._height - 50
def initial_invader_configuration(self):
return [
Invader.TYPE_1,
Invader.TYPE_2,
Invader.TYPE_2,
Invader.TYPE_3,
Invader.TYPE_3,
]
def initial_player_ship_position(self):
"""Initial player ship position after reset."""
return (self._width / 2, 25)
def adjust_invader_missiles(self, n_invaders):
"""Adjust invader missile inventory."""
if n_invaders >= 45:
missiles = 10
elif n_invaders >= 40:
missiles = 9
elif n_invaders >= 35:
missiles = 8
elif n_invaders >= 30:
missiles = 7
elif n_invaders >= 25:
missiles = 6
else:
missiles = 5
self.parameters_invader_missile["max_missiles"] = missiles
def add_kill_score(self):
"""Add score when an invader is killed."""
self._score += 1
def reset_world(self):
"""Reset the game."""
super(SpaceInvadersWorld, self).reset_world()
self._lives = 3
self._score = 0
self._step = 0
self._last_fire_step = {}
# Destroy all non-player entities.
def remove_nodes(node):
if isinstance(node, (Missile, Invader, Shield)):
node.kill()
self.walk(remove_nodes)
self.create_invaders()
self.create_shields()
self.player_ship.kill()
self.player_ship = self.player_ship_class(
world=self, position=self.initial_player_ship_position()
)
self._batch.add(self.player_ship)
def step(self):
"""Perform one environment update step."""
if self._lives <= 0:
self.reset_world()
self._terminal = False
self._step += 1
# Pick a random invader and make it fire.
def collect_invaders(node):
if isinstance(node, self.invader_class):
return node
invaders = self.walk(collect_invaders)
n_invaders = len(invaders)
if invaders:
invader = invaders[self.np_random.randint(0, n_invaders)]
self.fire_missile(invader, self.parameters_invader_missile)
# Adjust invader missile inventory.
self.adjust_invader_missiles(n_invaders)
super(SpaceInvadersWorld, self).step()
# Check if the player is out of lives or there are no invaders.
if self._lives <= 0 or not n_invaders:
self._terminal = True
class SingleLineSpaceInvadersWorld(SpaceInvadersWorld):
def initial_invader_row(self):
return self._height - 50
def initial_invader_configuration(self):
return [
Invader.TYPE_1,
]
def adjust_invader_missiles(self, n_invaders):
"""Adjust invader missile inventory."""
pass
def add_kill_score(self):
"""Add score when an invader is killed."""
self._score += 5
class InfiniteShieldsSpaceInvadersWorld(SpaceInvadersWorld):
def initial_shield_configuration(self):
return [
{"health": np.inf, "position": (self._width // 4, 200)},
{"health": np.inf, "position": (2 * self._width // 4, 200)},
{"health": np.inf, "position": (3 * self._width // 4, 200)},
]
class OffsetPlayerSpaceInvadersWorld(SpaceInvadersWorld):
def initial_shield_configuration(self):
return [
{"health": 20, "position": (self._width // 4, 200)},
{"health": 20, "position": (2 * self._width // 4, 200)},
{"health": 20, "position": (3 * self._width // 4, 200)},
]
def initial_player_ship_position(self):
"""Initial player ship position after reset."""
return (self._width / 2, 100)
class OffsetPlayer150SpaceInvadersWorld(SpaceInvadersWorld):
def initial_shield_configuration(self):
return [
{"health": 20, "position": (self._width // 4, 200)},
{"health": 20, "position": (2 * self._width // 4, 200)},
{"health": 20, "position": (3 * self._width // 4, 200)},
]
def initial_player_ship_position(self):
"""Initial player ship position after reset."""
return (self._width / 2, 150)
class RandomOffsetPlayerSpaceInvadersWorld(SpaceInvadersWorld):
offset_range_start = 25
offset_range_end = 125
def initial_shield_configuration(self):
return [
{"health": 20, "position": (self._width // 4, 200)},
{"health": 20, "position": (2 * self._width // 4, 200)},
{"health": 20, "position": (3 * self._width // 4, 200)},
]
def initial_player_ship_position(self):
"""Initial player ship position after reset."""
self._player_offset = int(
self.np_random.uniform(self.offset_range_start, self.offset_range_end)
)
return (self._width / 2, self._player_offset)
@property
def parameters(self):
parameters = super(RandomOffsetPlayerSpaceInvadersWorld, self).parameters
parameters.update(
{"player_offset": self._player_offset,}
)
return parameters
class OffsetPlayerSetASpaceInvadersWorld(RandomOffsetPlayerSpaceInvadersWorld):
offset_range_start = 25
offset_range_end = 75
class OffsetPlayerSetBSpaceInvadersWorld(RandomOffsetPlayerSpaceInvadersWorld):
offset_range_start = 75
offset_range_end = 125
class SideObstacle(PhysicalObject):
"""Side obstacle object."""
def __init__(self, *args, **kwargs):
kwargs["color"] = (80, 80, 80)
super(SideObstacle, self).__init__("side_obstacle.png", *args, **kwargs)
def create_physical_entity(self):
body = self._engine.CreateStaticBody(position=self.physical_position)
body.CreatePolygonFixture(
box=(
(self.width / 2.0) / self._world.physical_scale,
(self.height / 2.0) / self._world.physical_scale,
),
density=10.0,
friction=0.0,
restitution=0.0,
)
return body
class SideObstacleSpaceInvadersWorld(SpaceInvadersWorld):
def create_world(self, parent):
super(SideObstacleSpaceInvadersWorld, self).create_world(parent)
self.obstacle1 = SideObstacle(world=self, position=(10, self._height / 2))
parent.add(self.obstacle1, z=1)
self.obstacle2 = SideObstacle(
world=self, position=(self._width - 10, self._height / 2)
)
parent.add(self.obstacle2, z=1)
class LeftSideObstacleSpaceInvadersWorld(SpaceInvadersWorld):
def create_world(self, parent):
super(LeftSideObstacleSpaceInvadersWorld, self).create_world(parent)
self.obstacle = SideObstacle(world=self, position=(10, self._height / 2))
parent.add(self.obstacle, z=1)
class RightSideObstacleSpaceInvadersWorld(SpaceInvadersWorld):
def create_world(self, parent):
super(RightSideObstacleSpaceInvadersWorld, self).create_world(parent)
self.obstacle = SideObstacle(
world=self, position=(self._width - 10, self._height / 2)
)
parent.add(self.obstacle, z=1)
class RandomSideObstacleSpaceInvadersWorld(SpaceInvadersWorld):
def reset_world(self):
super(RandomSideObstacleSpaceInvadersWorld, self).reset_world()
self.reset_obstacle()
def reset_obstacle(self):
"""Reset obstacle width and position."""
if hasattr(self, "obstacle"):
self.obstacle.kill()
side = self.np_random.choice(["left", "right"])
width = int(self.np_random.uniform(-8, 2))
if side == "left":
x = width
elif side == "right":
x = self._width - width
self.obstacle = SideObstacle(world=self, position=(x, self._height / 2))
self._batch.add(self.obstacle, z=1)
class SingleInvaderSpaceInvadersWorld(SpaceInvadersWorld):
invader_class = CrossScreenMovingInvader
invaders_per_row = 1
parameters_invader_missile = {
"class": InvaderMissile,
# Firing rate (in steps).
"fire_rate": 10,
# Maximum number of missiles on screen.
"max_missiles": 20,
# Missile impulse.
"missile_impulse": 100,
}
def initial_invader_row(self):
return self._height - 200
def initial_invader_configuration(self):
return [
Invader.TYPE_1,
]
def adjust_invader_missiles(self, n_invaders):
"""Adjust invader missile inventory."""
pass
def add_kill_score(self):
"""Add score when an invader is killed."""
self._score += 55
def initial_shield_configuration(self):
return [
{"health": np.inf, "position": (4 * self._width // 5, 200)},
]
class WhiteShield(Shield):
"""White shield for the player."""
def __init__(self, *args, **kwargs):
kwargs.setdefault("color", (255, 255, 255))
super(WhiteShield, self).__init__(*args, **kwargs)
class WhiteLeftRightMovingInvader(LeftRightMovingInvader):
"""White invader which moves left and right."""
def __init__(self, *args, **kwargs):
kwargs.setdefault("color", (255, 255, 255))
super(WhiteLeftRightMovingInvader, self).__init__(*args, **kwargs)
class OneColorSpaceInvadersWorld(SpaceInvadersWorld):
shield_class = WhiteShield
invader_class = WhiteLeftRightMovingInvader
class Scaled80SpaceInvadersWorld(SpaceInvadersWorld):
def __init__(self, *args, **kwargs):
super(Scaled80SpaceInvadersWorld, self).__init__(*args, **kwargs)
self.scale = 0.80
class Scaled90SpaceInvadersWorld(SpaceInvadersWorld):
def __init__(self, *args, **kwargs):
super(Scaled90SpaceInvadersWorld, self).__init__(*args, **kwargs)
self.scale = 0.90
class Scaled95SpaceInvadersWorld(SpaceInvadersWorld):
def __init__(self, *args, **kwargs):
super(Scaled95SpaceInvadersWorld, self).__init__(*args, **kwargs)
self.scale = 0.95
class Scaled99SpaceInvadersWorld(SpaceInvadersWorld):
def __init__(self, *args, **kwargs):
super(Scaled99SpaceInvadersWorld, self).__init__(*args, **kwargs)
self.scale = 0.99
class RandomScaledSpaceInvadersWorld(SpaceInvadersWorld):
scale_range_start = 0.90
scale_range_end = 1.0
def reset_world(self):
super(RandomScaledSpaceInvadersWorld, self).reset_world()
self.scale = self.np_random.uniform(
self.scale_range_start, self.scale_range_end
)
@property
def parameters(self):
parameters = super(RandomScaledSpaceInvadersWorld, self).parameters
parameters.update(
{"scale": self.scale,}
)
return parameters
class ScaledSetASpaceInvadersWorld(RandomScaledSpaceInvadersWorld):
scale_range_start = 0.95
scale_range_end = 1.0
class ScaledSetBSpaceInvadersWorld(RandomScaledSpaceInvadersWorld):
scale_range_start = 0.90
scale_range_end = 0.95
class RandomActionStrengthSpaceInvadersWorld(SpaceInvadersWorld):
impulse_range_start = 30
impulse_range_end = 170
def reset_world(self):
super(RandomActionStrengthSpaceInvadersWorld, self).reset_world()
self._impulse_strength = self.np_random.uniform(
self.impulse_range_start, self.impulse_range_end
)
def ship_impulse(self):
return self._impulse_strength
@property
def parameters(self):
parameters = super(RandomActionStrengthSpaceInvadersWorld, self).parameters
parameters.update(
{"ship_impulse": self._impulse_strength,}
)
return parameters
class ActionStrengthSetASpaceInvadersWorld(RandomActionStrengthSpaceInvadersWorld):
impulse_range_start = 30
impulse_range_end = 100
class ActionStrengthSetBSpaceInvadersWorld(RandomActionStrengthSpaceInvadersWorld):
impulse_range_start = 100
impulse_range_end = 170
class MultiParameterSetASpaceInvadersWorld(
OffsetPlayerSetASpaceInvadersWorld,
ActionStrengthSetASpaceInvadersWorld,
# ScaledSetASpaceInvadersWorld,
):
"""
Parameters (all from set A):
- player offset
- action strength
- scale (NOTE: removed)
"""
pass
class MultiParameterSetBSpaceInvadersWorld(
OffsetPlayerSetBSpaceInvadersWorld,
ActionStrengthSetBSpaceInvadersWorld,
# ScaledSetBSpaceInvadersWorld,
):
"""
Parameters (all from set B):
- player offset
- action strength
- scale (NOTE: removed)
"""
pass
class SpaceInvaders(GymEnvironment):
"""Space invaders Gym environment."""
worlds = {
"baseline": SpaceInvadersWorld,
"single_line": SingleLineSpaceInvadersWorld,
"inf_shields": InfiniteShieldsSpaceInvadersWorld,
"offset_player": OffsetPlayerSpaceInvadersWorld,
"offset_player150": OffsetPlayer150SpaceInvadersWorld,
"random_offset_player": RandomOffsetPlayerSpaceInvadersWorld,
"side_obstacle": SideObstacleSpaceInvadersWorld,
"left_side_obstacle": LeftSideObstacleSpaceInvadersWorld,
"right_side_obstacle": RightSideObstacleSpaceInvadersWorld,
"random_side_obstacle": RandomSideObstacleSpaceInvadersWorld,
"single_invader": SingleInvaderSpaceInvadersWorld,
"one_color": OneColorSpaceInvadersWorld,
"scaled_80": Scaled80SpaceInvadersWorld,
"scaled_90": Scaled90SpaceInvadersWorld,
"scaled_95": Scaled95SpaceInvadersWorld,
"scaled_99": Scaled99SpaceInvadersWorld,
"random_scaled": RandomScaledSpaceInvadersWorld,
"offset_player_set_a": OffsetPlayerSetASpaceInvadersWorld,
"offset_player_set_b": OffsetPlayerSetBSpaceInvadersWorld,
"scaled_set_a": ScaledSetASpaceInvadersWorld,
"scaled_set_b": ScaledSetBSpaceInvadersWorld,
"action_strength_set_a": ActionStrengthSetASpaceInvadersWorld,
"action_strength_set_b": ActionStrengthSetBSpaceInvadersWorld,
"multi_parameter_set_a": MultiParameterSetASpaceInvadersWorld,
"multi_parameter_set_b": MultiParameterSetBSpaceInvadersWorld,
}
def get_action_meanings(self):
return [
"NOOP",
"LEFT",
"RIGHT",
"FIRE",
]
def get_keys_to_action(self):
return {
(): 0,
(ord("a"),): 1,
(ord("d"),): 2,
(ord("s"),): 3,
(ord("a"), ord("d")): 0,
}
|
from setuptools import find_packages, setup
from distutils.extension import Extension
from Cython.Build import cythonize
import numpy as np
def readme():
with open('README.md') as f:
content = f.read()
return content
def find_version():
version_file = 'torchreid/__init__.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def numpy_include():
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
return numpy_include
ext_modules = [
Extension(
'torchreid.metrics.rank_cylib.rank_cy',
['torchreid/metrics/rank_cylib/rank_cy.pyx'],
include_dirs=[numpy_include()],
)
]
setup(
name='torchreid',
version=find_version(),
description='Pytorch framework for deep-learning person re-identification',
author='Kaiyang Zhou',
author_email='k.zhou.vision@gmail.com',
license='MIT',
long_description=readme(),
url='https://github.com/KaiyangZhou/deep-person-reid',
packages=find_packages(),
install_requires=[
'numpy',
'Cython',
'h5py',
'Pillow',
'six',
'scipy>=1.0.0',
'torch>=0.4.1',
'torchvision>=0.2.1'
],
keywords=[
'Person Re-Identification',
'Deep Learning',
'Computer Vision'
],
ext_modules=cythonize(ext_modules)
) |
from django.db import models
class BaseAbstractModel(models.Model):
'''
This model defines base models that implements common fields like:
created_at
updated_at
is_deleted
'''
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
is_deleted = models.BooleanField(default = False)
def soft_delete(self):
'''Soft delete a model instance'''
self.is_deleted = True
self.save()
class Meta:
abstract = True
ordering = ['-created_at']
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.connect(('192.168.1.7', 9000))
sock.settimeout(5)
# cancel previous xmodem download
sock.send(chr(0x18)*20)
sock.send('\r')
# wait prompt
s = ''
while 1:
data = sock.recv(1024)
s = s + data
if 'sds://>' in s:
print('found prompt')
break
# or for time saving fburn rd 0 0x00000000 0x01440000
sock.send('fburn rd 0 0x00000000 0x08000000\r')
# wait xmodem
s = ''
while 1:
data = sock.recv(1024)
s = s + data
if 'Start xmodem now!' + chr(0x0A) in s:
print('start xmodem')
break
sock.send('C')
f = open('firmware.bin', 'wb')
s = ''
addr = 0
while 1:
data = sock.recv(1024)
s = s + data
if s[0] == chr(0x04):
print('found EOT')
sock.send(chr(0x06))
break
if len(s) == 133:
if addr % 0x20000 == 0:
print('{:08x}'.format(addr))
sock.send(chr(0x06))
f.write(s[3:-2])
s = ''
addr = addr + 128
f.close()
sock.close()
|
#!/usr/bin/env pnpython3
#
# KefEdit
#
# Credit: Lan Dam
#
# Updated Feb 2018
import sys
import logging
import os
import time
import numpy
import os.path as path
from tempfile import mkdtemp
from copy import deepcopy
from operator import itemgetter
from ph5.core import kefutility
LOGGER = logging.getLogger(__name__)
try:
from PySide2 import QtWidgets, QtCore, QtGui
except Exception:
msg = ("\n\nNo module named PySide2. "
"Please environment_gui.yml to install conda environment"
"PySide2 is needed for kefedit.")
raise ImportError(msg)
# added on 20180226 so that temp.kef will always be available
keftmpfile = path.join(mkdtemp(), 'temp.kef')
PROG_VERSION = 2021.84
EXPL = {}
# CLASS ####################
# Author: Lan
# Updated: 201702
# CLASS: KefEdit
class KefEdit(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setWindowTitle("KEF Editor Ver. %s" % PROG_VERSION)
self.kefFilename = None
self.path_tabs = [] # to keep the tabwidget to delete
self.ph5api = None # to resue when open tables from the current
# opened ph5
self.notsave = True # to identify if the open data are save
self.initMenu()
mainFrame = QtWidgets.QFrame(self)
self.setCentralWidget(mainFrame)
mainLayout = QtWidgets.QVBoxLayout()
mainFrame.setLayout(mainLayout)
statusLayout = QtWidgets.QHBoxLayout()
mainLayout.addLayout(statusLayout)
statusLayout.addWidget(QtWidgets.QLabel("Color keys:"))
updateCol = QtWidgets.QLabel("UPDATE")
updateCol.installEventFilter(self)
EXPL[updateCol] = "Color for Changed Row"
updateCol.setAlignment(QtCore.Qt.AlignHCenter)
updateCol.setFixedWidth(85)
updateCol.setStyleSheet(" background-color: %s" % updateColName)
statusLayout.addWidget(updateCol)
deleteCol = QtWidgets.QLabel("DELETE")
deleteCol.installEventFilter(self)
EXPL[deleteCol] = "Color for Deleted Row"
deleteCol.setAlignment(QtCore.Qt.AlignHCenter)
deleteCol.setFixedWidth(85)
deleteCol.setStyleSheet(" background-color: %s" % deleteColName)
statusLayout.addWidget(deleteCol)
statusLayout.addStretch(1)
self.path_tabWidget = QtWidgets.QTabWidget() # each tab keep a table
mainLayout.addWidget(self.path_tabWidget)
self.statusBar = self.statusBar()
self.setGeometry(0, 0, 1200, 900)
self.showMaximized()
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.Enter:
if object not in EXPL.keys():
return False
P = object.pos()
QtWidgets.QToolTip.showText(
self.mapToGlobal(QtCore.QPoint(P.x(), P.y() + 20)),
EXPL[object])
return True
return False
def initMenu(self):
# HELP MENU #################
manualAction = QtWidgets.QAction('Manual', self)
manualAction.setShortcut('F1')
manualAction.triggered.connect(self.OnManual)
whatsnewAction = QtWidgets.QAction("What's new?", self)
whatsnewAction.setShortcut('F1')
whatsnewAction.triggered.connect(self.OnWhatsnew)
# FILE MENU #################
openKefAction = QtWidgets.QAction('Open Kef File', self)
openKefAction.triggered.connect(self.OnOpenKef)
openPH5Action = QtWidgets.QAction('Open PH5 File', self)
openPH5Action.triggered.connect(self.OnOpenPH5)
self.openTableAction = QtWidgets.QAction(
'Open table(s) in the current PH5 File', self)
self.openTableAction.triggered.connect(self.OnOpenCurrPH5)
self.openTableAction.setEnabled(False)
# ---------------- Save ----------------
self.saveKefAction = QtWidgets.QAction('Save as Kef File', self)
self.saveKefAction.triggered.connect(self.OnSaveKef)
self.saveKefAction.setEnabled(False)
self.savePH5Action = QtWidgets.QAction('Save as PH5 File', self)
self.savePH5Action.triggered.connect(self.OnSavePH5)
self.savePH5Action.setEnabled(False)
self.updatePH5Action = QtWidgets.QAction(
'Update the Current PH5 File', self)
self.updatePH5Action.triggered.connect(self.OnUpdatePH5)
self.updatePH5Action.setEnabled(False)
self.saveCSVAction = QtWidgets.QAction('Save as CSV File', self)
self.saveCSVAction.triggered.connect(self.OnSaveCSV)
self.saveCSVAction.setEnabled(False)
# ---------------- exit ----------------
exitAction = QtWidgets.QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.triggered.connect(self.closeEvent)
# ADDING MENU #####################
menubar = QtWidgets.QMenuBar()
self.setMenuBar(menubar)
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(openKefAction)
fileMenu.addAction(openPH5Action)
fileMenu.addAction(self.openTableAction)
fileMenu.addAction(self.saveKefAction)
fileMenu.addAction(self.savePH5Action)
fileMenu.addAction(self.updatePH5Action)
fileMenu.addAction(self.saveCSVAction)
fileMenu.addAction(exitAction)
fileMenu.insertSeparator(self.saveKefAction)
fileMenu.insertSeparator(exitAction)
helpMenu = menubar.addMenu('&Help')
helpMenu.addAction(manualAction)
helpMenu.addAction(whatsnewAction)
###############################
# def closeEvent
# author: Lan Dam
# updated: 201704
# * check if the changes haven't been saved, give user a chance to change
# mind
# * close the app when the widget is closed (to close the opened PH5)
def closeEvent(self, evt=None):
for tab in self.path_tabs:
if self.notsave is True and \
(
tab.updateList != [] or tab.deleteList != [] or
tab.addDataList != []):
msg = "There are still things you have worked on but haven't" \
"saved." + \
"\nClick on Cancel to cancel closing. " + \
"\nClick on Close to close KefEdit."
result = QtWidgets.QMessageBox.question(
self, "Are you sure?", msg,
QtWidgets.QMessageBox.Cancel, QtWidgets.QMessageBox.Close)
if result == QtWidgets.QMessageBox.Cancel:
if evt.__class__.__name__ != 'bool':
evt.ignore()
return
QtCore.QCoreApplication.instance().quit()
sys.exit(application.exec_())
def OnManual(self):
self.manualWin = ManWindow("manual")
def OnWhatsnew(self):
self.whatsnewWin = ManWindow("whatsnew")
###############################
# def OnOpenKef
# author: Lan Dam
# updated: 201702
# * open Kef file, read data into self.dataTabel, keySets then into
# labelSets
# (each set represent for data in a path)
# * then call self.setData() to set the given data in display
def OnOpenKef(self):
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
directory="/home/", filter="Kef Files(*.kef)")
self.kefFilename = filename
if not filename:
return
self.path2file = os.path.dirname(str(filename))
self.filename = os.path.basename(str(filename))
if self.ph5api is not None:
self.ph5api.close()
del self.ph5api
self.ph5api = None
self.openTableAction.setEnabled(False)
self.updatePH5Action.setEnabled(False)
self.dataTable, self.labelSets, self.totalLines, self.types =\
kefutility.Kef2TableData(
self.statusBar, filename)
if self.totalLines > 10000:
self.statusBar.showMessage(
"Please be patient while displaying...")
self.setData()
self.notsave = True
###############################
# def OnOpenPH5
# author: Lan Dam
# updated: 201703
# Open PH5 file
# * use kefutility.GetPrePH5Info give user list of tables and info to
# select
# to get info from kefutility.PH5toDataTable
# * call SelectTableDialog for user to select which table(s) to display
# * in SelectTableDialog, the following tasks will be perfomed:
# [Read data into self.dataTable, keySets into labelSets
# (each set represent for data in a path)
# then call self.setData() to set the given data in display]
def OnOpenPH5(self):
filename, _ = QtWidgets.QFileDialog.getOpenFileName(
directory="/home/", filter="PH5 Files(*.ph5)")
if not filename:
return
self.path2file = os.path.dirname(str(filename))
self.filename = os.path.basename(str(filename))
if self.ph5api is not None:
self.ph5api.close()
del self.ph5api
self.ph5api, availTables, arrays, shotLines, offsets, das =\
kefutility.GetPrePH5Info(self.filename, self.path2file)
self.selTableDlg = SelectTableDialog(self, availTables, arrays,
shotLines, offsets, das)
self.kefFilename = None
###############################
# def OnOpenCurrPH5
# author: Lan Dam
# updated: 201704
# Open other tables on the current opened PH5 File
# * similar to onOpenPH5() but skip the part of opening file to
# getPrePH5Info
# * reshow SelTableDlg for user to select which table(s) to display
# * in SelectTableDialog, the following tasks will be perfomed:
# [Read data into self.dataTable, keySets into labelSets
# (each set represent for data in a path)
# then call self.setData() to set the given data in display]
def OnOpenCurrPH5(self):
self.selTableDlg.show()
self.selTableDlg.move(70,
70) # to move to the same position when
# create new
###############################
# def OnSaveKef
# author: Lan Dam
# updated: 201704
# save current table(s) into a kef file
# * user choose filename
# * call _saveKeffile() to save kef format into the filename
# * inform when successfully save and ask to close KefEdit
def OnSaveKef(self):
# print "onSaveKef"
if not self._checkAddTableView():
return
# created suggestedFileName to recommend to user
if 'kef' in self.filename:
ss = self.filename.split(".")
ss[0] += "_editted"
suggestedFileName = ".".join(ss)
else:
arg = self.arg
if self.tableType == 'Array_t':
arg = "{0:03d}".format(int(arg))
if arg is not None:
suggestedFileName = self.tableType + "_" + arg
else:
suggestedFileName = self.tableType
suggestedFileName = self.path2file + "/" + suggestedFileName +\
'.kef'
savefilename, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File", suggestedFileName, filter="Kef File (*.kef)")
if not savefilename:
return
START = time.time()
# start kef file with the version of KefEdit
currText = "#\n#\t%s\tKefEdit version: %s" % (
time.ctime(time.time()), PROG_VERSION)
result = self._saveKeffile(savefilename, currText)
if result is False:
return
END = time.time()
self.statusBar.showMessage(
"Successfully saving Kef file. Total processing time %s seconds"
% (END - START))
msg = "File %s has been saved successfully." \
"\nDo you want to close KEF Editor?" % savefilename
result = QtWidgets.QMessageBox.question(
self, "Successfully Save File", msg,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
self.notsave = False # after saving, reset notsave
if result == QtWidgets.QMessageBox.Yes:
if self.ph5api is not None:
self.ph5api.close()
QtCore.QCoreApplication.instance().quit()
sys.exit(application.exec_())
###############################
# def OnSavePH5
# author: Lan Dam
# updated: 201704
# update currently opened table(s) the current PH5 file using
# tab.UpdatePH5()
def OnUpdatePH5(self):
START = time.time()
for tab in self.path_tabs:
tab.UpdatePH5()
END = time.time()
savefilename = self.path2file + "/" + self.filename
self.statusBar.showMessage(
"Successfully updating the current PH5 file. Total processing"
"time %s seconds" % (
END - START))
msg = "File %s has been updated successfully." \
"\nDo you want to close KEF Editor?" % savefilename
QtWidgets.QMessageBox.question(
self, "Successfully Save File", msg,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
self.notsave = False # after saving, reset notsave
# def OnSavePH5
# author: Lan Dam
# updated: 201802
# update currently opened table(s) to an existing PH5 file, or create new
# PH5 file from the opened table(s)
# ** if it is the currently opened one, call self.OnUpdatePH5 instead
# * user choose filename to save
# * call _saveKeffile() to save all tables into the a temp file in kef
# format
# * For each table (tab/path) call kefutility.NukeTable() to remove the
# table from the PH5 file
# * call os.system() to run kef2ph5 script to add the tables in temp.
# kef file to the filename that user chose
def OnSavePH5(self):
if not self._checkAddTableView():
return
savefilename, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File", self.path2file, filter="PH5 File (*.ph5)")
if not savefilename:
return
START = time.time()
options = {}
options['path2ph5'] = os.path.dirname(str(savefilename))
options['outph5file'] = os.path.basename(str(savefilename))
# the file that user choose is the currently opened one => update the
# file
if self.path2file == options['path2ph5'] and self.filename == options[
'outph5file']:
self.OnUpdatePH5()
return
# save in a temp kef file
if not self.kefFilename:
options['keffile'] = keftmpfile
self._saveKeffile(keftmpfile)
else:
# add on 20180226 to reduce step when the opened file is a kef file
options['keffile'] = self.kefFilename
if path.exists(savefilename):
for p in self.pathAll:
self.statusBar.showMessage(
"Removing existing table %s from PH5file" % p)
delResult = kefutility.NukeTable(self, options['outph5file'],
options['path2ph5'], p)
if delResult is False:
return
from subprocess import Popen, PIPE, STDOUT
pathStr = ','.join(self.pathAll)
cmdStr = "keftoph5 -n %(outph5file)s -k %(keffile)s -p %(path2ph5)s"\
% options
self.statusBar.showMessage(
"Inserting new table(s) %s into PH5file" % pathStr)
print "Inserting new table(s) %s into PH5file" % pathStr
p = Popen(cmdStr, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT,
close_fds=True)
output = p.stdout.read()
print "The following command is running:\n", cmdStr
print "Output: ", output
doclose = False
if 'error' not in output.lower():
msg = "File %s has been saved successfully." \
"\nDo you want to close KEF Editor?" % savefilename
result = QtWidgets.QMessageBox.question(
self, "Successfully Save File",
msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
self.notsave = False # after saving, reset notsave
if result == QtWidgets.QMessageBox.Yes:
doclose = True
else:
QtWidgets.QMessageBox.warning(
self, "Error in saving to PH5 file", output)
msg = "Do you want to close KEF Editor?"
result = QtWidgets.QMessageBox.question(
self, "", msg,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
doclose = True
END = time.time()
self.statusBar.showMessage(
"Successfully saving PH5 file. Total processing time %s seconds"
% (END - START))
if doclose:
if self.ph5api is not None:
self.ph5api.close()
QtCore.QCoreApplication.instance().quit()
sys.exit(application.exec_())
try:
os.unlink(keftmpfile) # remove keftmpfile
except BaseException:
pass
###############################
# def _checkAddTableView
# author: Lan Dam
# updated: 201704
# when saving the table, check if the data in AddTableView are all
# inserted into MainTableView
def _checkAddTableView(self):
for tab in self.path_tabs:
if tab.addDataList != []:
msg = "There are still data in Add Table View." + \
"\nClick 'Cancel' to cancel saving to work on the data."\
+ \
"\nClick 'Save' to continue saving."
result = QtWidgets.QMessageBox.question(
self, "Are you sure?", msg,
QtWidgets.QMessageBox.Cancel, QtWidgets.QMessageBox.Save)
if result == QtWidgets.QMessageBox.Cancel:
return False
return True
###############################
# def _saveKeffile
# author: Lan Dam
# updated: 201704
# save all opened tables into the passed savefileme in kef format
# * loop through tabs, for each tab call tab.ToString appen the table in
# kef format to currText
# * save currText into file 'savefilename'
def _saveKeffile(self, savefilename, currText=""):
i = 0
for tab in self.path_tabs:
currText, i = tab.ToString(currText, i)
try:
saveFile = open(savefilename, 'w')
saveFile.write(currText)
except Exception, e:
msg = "Can't save the kef file %s due to the error:%s" % (
savefilename, str(e))
QtWidgets.QMessageBox.warning(self, "Error", msg)
return False
saveFile.close()
return True
def OnSaveCSV(self):
if not self._checkAddTableView():
return
error = ""
START = time.time()
try:
for tab in self.path_tabs:
# created suggestedFileName to recommend to user
suggestedFileName = tab.path.split("/")[-1]
suggestedFileName = self.path2file + "/" + suggestedFileName\
+ '.csv'
savefilename, _ = QtWidgets.QFileDialog.getSaveFileName(
self, "Save File on Path: %s" % tab.path,
suggestedFileName, filter="CSV File (*.csv)")
if not savefilename:
continue
tab.SaveCSV(savefilename)
except Exception, e:
error = str(e)
if error == "":
msg = "File %s has been saved successfully." \
"\nDo you want to close KEF Editor?" % savefilename
result = QtWidgets.QMessageBox.warning(
self, "Successfully Save File",
msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
doclose = True
else:
QtWidgets.QMessageBox.warning(self, "Error in saving as CSV file")
msg = "Do you want to close KEF Editor?"
result = QtWidgets.QMessageBox.question(
self, "", msg,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if result == QtWidgets.QMessageBox.Yes:
doclose = True
END = time.time()
self.statusBar.showMessage(
"Successfully saving as CSV file. Total processing time %s"
"seconds" % (END - START))
if doclose:
if self.ph5api is not None:
self.ph5api.close()
QtCore.QCoreApplication.instance().quit()
sys.exit(application.exec_())
# def setData
# author: Lan Dam
# updated: 201702
# display data for each path in a TablePanel placed in a tab
# * remove all current tab
# * loop through each path, create a new tab with that path's data
# * add the tab to self.path_tabs to delete the tabWidget after removeTab
# * enable save options
def setData(self):
self.addMsg = ""
if self.totalLines > 100000:
self.addMsg = "It will take a couple of minutes to populate the" \
"table(s). Please wait..."
# remove existing tab
while self.path_tabs != []:
self.path_tabWidget.removeTab(len(self.path_tabs) - 1)
self.path_tabs.pop(len(self.path_tabs) - 1)
self.processedLine = 0
# set tab for each path
self.pathAll = self.dataTable.keys()
self.pathAll.sort()
for p in self.pathAll:
if self.dataTable[p] in [None, []]:
errMsg = "There are no data for path %s.\n Please check if" \
"the selected PH5 is a master file."
QtWidgets.QMessageBox.warning(self, "Error", errMsg % p)
return
pathWidget = TablePanel(self, p, self.dataTable[p],
self.labelSets[p], self.types[p])
self.path_tabWidget.addTab(pathWidget, p)
self.path_tabs.append(pathWidget)
self.saveKefAction.setEnabled(True)
self.savePH5Action.setEnabled(True)
self.saveCSVAction.setEnabled(True)
self.statusBar.showMessage("")
if self.totalLines > 100000:
self.statusBar.showMessage(
"Please be patient when clicking on each tab. Initially it"
"takes some time to process.")
updateColName = QtGui.QColor(245, 225, 225,
100).name() # because there is a difference
# between color in label and in
deleteColName = QtGui.QColor(180, 150, 180, 100).name() # table cells
UPDATECOLOR = QtGui.QBrush(QtGui.QColor(225, 175, 175, 100)) # light pink
DELETECOLOR = QtGui.QBrush(QtGui.QColor(70, 10, 70, 100)) # light purple
# CLASS ####################
# class TablePanel: Each path will have a tableView to display its data
# with path: path in Kef/PH5
# table: data in list
# labels: list of columns/keys
class TablePanel(QtWidgets.QMainWindow):
def __init__(self, parent, path, table, labels, types):
QtWidgets.QMainWindow.__init__(self)
self.parent = parent
self.path = path
self.table = table
self.updatedTable = numpy.array(table)
self.labels = labels
self.types = types
self.selectedCells = []
self.minChangedRowId = None
self.updateList = [] # list of rows that have been updated
self.deleteList = [] # list of rows to delete
self.addDataList = [] # list of data to add
self.addCells = None
mainFrame = QtWidgets.QFrame(self)
self.setCentralWidget(mainFrame)
mainLayout = QtWidgets.QVBoxLayout()
mainFrame.setLayout(mainLayout)
# set mainTableView
self.mainTableView = QtWidgets.QTableWidget(self)
self.mainTableView.installEventFilter(self)
EXPL[self.mainTableView] = "MainView where main data are displayed."
self.mainTableView.cellClicked.connect(self.OnMainTableClick)
self.mainTableView.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection)
mainLayout.addWidget(self.mainTableView)
# set view range
self.mainTableView.setColumnCount(len(self.labels))
self.mainTableView.setRowCount(len(self.table))
# set data into cells
for r in range(len(self.table)):
parent.processedLine += 1
if parent.processedLine % 10000 == 0:
msg = "Displaying Data on TableView: %s/%s rows. %s" % (
parent.processedLine, parent.totalLines, parent.addMsg)
parent.statusBar.showMessage(msg)
for c in range(len(self.labels)):
item = QtWidgets.QTableWidgetItem(self.table[r][c])
item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
# disable cell editing
self.mainTableView.setItem(r, c, item)
# change to fit columns with its contents instead of having same
# default size for all columns
self.mainTableView.resizeColumnsToContents()
# set horizontal Headers
self.mainTableView.setHorizontalHeaderLabels(self.labels)
self.mainTableView.horizontalHeader().setVisible(True)
# set Tool tip for each horizontal header
for c in range(len(self.labels)):
self.mainTableView.horizontalHeaderItem(c).setToolTip(
self.labels[c])
# change tools ######################
changeBox = QtWidgets.QHBoxLayout()
mainLayout.addLayout(changeBox)
changeBox.setSpacing(25)
changeBox.addStretch(1)
changeBox.addWidget(QtWidgets.QLabel("Select Types:"))
self.singleCell = QtWidgets.QRadioButton('Single Cell')
self.singleCell.installEventFilter(self)
EXPL[
self.singleCell] = "Select one Single Cell only." \
"(in either MainView or AddRowView)"
self.singleCell.clicked.connect(self.OnClearSelected)
changeBox.addWidget(self.singleCell)
self.allInStation = QtWidgets.QRadioButton(
'All Similar Cells in Station')
self.allInStation.installEventFilter(self)
EXPL[
self.allInStation] = "All cells that have the same station id and"\
"value with the clicked cell will be" \
"selected. Avalaible only for Array Table." \
"(in either MainView or AddRowView)"
self.allInStation.clicked.connect(self.OnClearSelected)
changeBox.addWidget(self.allInStation)
if 'Array_t' in path:
self.allInStation.setChecked(True)
else:
self.singleCell.setChecked(True)
self.allInStation.setEnabled(False)
self.allInColumn = QtWidgets.QRadioButton(
'All Similar Cells in Column')
self.allInColumn.installEventFilter(self)
EXPL[
self.allInColumn] = "All cells that have the same value and" \
"column with the clicked cell will be" \
"selected.(in either MainView or AddRowView)"
self.allInColumn.clicked.connect(self.OnClearSelected)
changeBox.addWidget(self.allInColumn)
self.changedValCtrl = QtWidgets.QLineEdit('')
self.changedValCtrl.installEventFilter(self)
EXPL[
self.changedValCtrl] = "Values in the selected items will be" \
"change to the value in this box when" \
"'Change' button is clicked."
self.changedValCtrl.setFixedWidth(400)
changeBox.addWidget(self.changedValCtrl)
self.changeBtn = QtWidgets.QPushButton('Change', self)
self.changeBtn.installEventFilter(self)
EXPL[
self.changeBtn] = "Apply changing values in the selected items" \
"(in either MainView or AddRowView)."
self.changeBtn.clicked.connect(self.OnChange)
changeBox.addWidget(self.changeBtn)
self.back2orgBtn = QtWidgets.QPushButton('Back to Org', self)
self.back2orgBtn.installEventFilter(self)
EXPL[
self.back2orgBtn] = "Reset selected items back to their original" \
"values."
self.back2orgBtn.clicked.connect(self.OnBack2org)
changeBox.addWidget(self.back2orgBtn)
changeBox.addStretch(1)
mainLayout.addWidget(Seperator(thick=2, orientation="horizontal"))
# column tools ######################
columnBox1 = QtWidgets.QHBoxLayout()
mainLayout.addLayout(columnBox1)
columnBox1.addStretch(1)
columnBox1.addWidget(QtWidgets.QLabel("Selected Column"))
self.selectedColumnCtrl = QtWidgets.QLineEdit('')
self.selectedColumnCtrl.installEventFilter(self)
EXPL[self.selectedColumnCtrl] = "The label of the selected column."
self.selectedColumnCtrl.setReadOnly(True)
self.selectedColumnCtrl.setFixedWidth(250)
columnBox1.addWidget(self.selectedColumnCtrl)
columnBox1.addWidget(
QtWidgets.QLabel(" Position of Char. to change"))
self.characterOrderCtrl = QtWidgets.QComboBox(self)
self.characterOrderCtrl.installEventFilter(self)
EXPL[
self.characterOrderCtrl] = "The first position of character(s)" \
"to change."
self.characterOrderCtrl.currentIndexChanged.connect(
self.OnChangeCharOrder)
columnBox1.addWidget(self.characterOrderCtrl)
columnBox1.addWidget(QtWidgets.QLabel(" Number of Char. to change"))
self.noOfCharsCtrl = QtWidgets.QComboBox(self)
self.noOfCharsCtrl.installEventFilter(self)
EXPL[self.noOfCharsCtrl] = "The number of character(s) to change."
self.noOfCharsCtrl.currentIndexChanged.connect(self.OnChangeNoOfChars)
columnBox1.addWidget(self.noOfCharsCtrl)
columnBox1.addWidget(QtWidgets.QLabel(" X"))
self.XCtrl = QtWidgets.QLineEdit('')
self.XCtrl.installEventFilter(self)
EXPL[self.XCtrl] = "The value to be applied in column changing."
self.XCtrl.textChanged.connect(self.OnXChanged)
self.XCtrl.setFixedWidth(400)
columnBox1.addWidget(self.XCtrl)
columnBox1.addStretch(1)
columnBox2 = QtWidgets.QHBoxLayout()
mainLayout.addLayout(columnBox2)
columnBox2.addStretch(1)
columnBox2.setSpacing(40)
self.changeChar2XBtn = QtWidgets.QPushButton('Change Char. to X', self)
self.changeChar2XBtn.installEventFilter(self)
EXPL[
self.changeChar2XBtn] = "Change the selected character(s) in" \
"each item of the selected column to X."
self.changeChar2XBtn.clicked.connect(self.OnChangeChar2X)
columnBox2.addWidget(self.changeChar2XBtn)
self.plusX2CharBtn = QtWidgets.QPushButton('Plus X to Char.', self)
self.plusX2CharBtn.installEventFilter(self)
EXPL[
self.plusX2CharBtn] = "Plus X to the selected character(s) in" \
"each item of the selected column."
self.plusX2CharBtn.clicked.connect(self.OnPlusX2Char)
columnBox2.addWidget(self.plusX2CharBtn)
self.changeCol2XBtn = QtWidgets.QPushButton('Change Column to X', self)
self.changeCol2XBtn.installEventFilter(self)
EXPL[
self.changeCol2XBtn] = "Change each item of the selected column" \
"to X."
self.changeCol2XBtn.clicked.connect(self.OnChangeCol2X)
columnBox2.addWidget(self.changeCol2XBtn)
self.plusX2ColBtn = QtWidgets.QPushButton('Plus X to Column', self)
self.plusX2ColBtn.installEventFilter(self)
EXPL[self.plusX2ColBtn] = "Plus X to each item of the selected column."
self.plusX2ColBtn.clicked.connect(self.OnPlusX2Col)
columnBox2.addWidget(self.plusX2ColBtn)
self.resetColBtn = QtWidgets.QPushButton('Reset Column', self)
self.resetColBtn.installEventFilter(self)
EXPL[
self.resetColBtn] = "Reset each item of the selected column back" \
"to its original value."
self.resetColBtn.clicked.connect(self.OnResetCol)
columnBox2.addWidget(self.resetColBtn)
columnBox2.addStretch(1)
mainLayout.addWidget(Seperator(thick=2, orientation="horizontal"))
# move tools ######################
moveBox = QtWidgets.QHBoxLayout()
mainLayout.addLayout(moveBox)
moveBox.addStretch(1)
moveBox.addWidget(QtWidgets.QLabel("Selected Row(s)"))
self.selectedRowsCtrl = QtWidgets.QLineEdit('')
self.selectedRowsCtrl.installEventFilter(self)
EXPL[
self.selectedRowsCtrl] = "Show list of Selected Items' rows." \
"(User may want to look at these rows" \
"when moving them to a position under" \
"another row.)"
self.selectedRowsCtrl.setReadOnly(True)
self.selectedRowsCtrl.setFixedWidth(500)
moveBox.addWidget(self.selectedRowsCtrl)
moveBox.addWidget(
QtWidgets.QLabel(" Move Selected Row(s) under Line No"))
self.moveLineCtrl = QtWidgets.QComboBox(self)
self.moveLineCtrl.installEventFilter(self)
EXPL[
self.moveLineCtrl] = "Line Number under which the Selected Row(s)"\
"will be moved to. "
self.moveLineCtrl.currentIndexChanged.connect(self.OnSelectMoveLine)
self.moveLineCtrl.clear()
lineNoList = [str(n) for n in
[' top '] + range(1, len(self.table) + 1)]
self.moveLineCtrl.addItems(lineNoList)
moveBox.addWidget(self.moveLineCtrl)
self.moveBtn = QtWidgets.QPushButton('Move', self)
self.moveBtn.installEventFilter(self)
EXPL[
self.moveBtn] = "Move the Selected Row(s) to under the Selected" \
"Line No."
self.moveBtn.setFixedWidth(90)
self.moveBtn.clicked.connect(self.OnMove)
moveBox.addWidget(self.moveBtn)
moveBox.addStretch(1)
mainLayout.addWidget(Seperator(thick=2, orientation="horizontal"))
# delete tools ######################
deleteBox = QtWidgets.QHBoxLayout()
mainLayout.addLayout(deleteBox)
deleteBox.addStretch(1)
self.deleteBtn = QtWidgets.QPushButton(
'Delete Row(s) on Selected Cell(s)', self)
self.deleteBtn.installEventFilter(self)
EXPL[self.deleteBtn] = "Mark deleted for the Selected Rows."
self.deleteBtn.setFixedWidth(400)
self.deleteBtn.clicked.connect(self.OnDelete)
deleteBox.addWidget(self.deleteBtn)
deleteBox.addSpacing(250)
self.unDeleteBtn = QtWidgets.QPushButton('UnDelete', self)
self.unDeleteBtn.installEventFilter(self)
EXPL[self.unDeleteBtn] = "UnMark deleted for the Selected Rows."
self.unDeleteBtn.setFixedWidth(400)
self.unDeleteBtn.clicked.connect(self.OnUndelete)
deleteBox.addWidget(self.unDeleteBtn)
deleteBox.addStretch(1)
mainLayout.addWidget(Seperator(thick=2, orientation="horizontal"))
# add tools ######################
addBox = QtWidgets.QHBoxLayout()
mainLayout.addLayout(addBox)
addBox.addStretch(1)
self.addBtn = QtWidgets.QPushButton(
'Add Row(s) with Data Copy from Selected Cell(s)', self)
self.addBtn.installEventFilter(self)
EXPL[
self.addBtn] = "Copy Selected Row(s) in MainView to the" \
"AddRowView at the bottom."
self.addBtn.setFixedWidth(400)
self.addBtn.clicked.connect(self.OnAdd)
addBox.addWidget(self.addBtn)
addBox.addSpacing(250)
addBox.addWidget(
QtWidgets.QLabel("Insert Selected Row(s) under Line No"))
self.insertLineCtrl = QtWidgets.QComboBox(self)
self.insertLineCtrl.installEventFilter(self)
EXPL[
self.insertLineCtrl] = "Select the Line No under which the" \
"selected rows in AddRowView will be" \
"inserted to."
self.insertLineCtrl.currentIndexChanged.connect(self.OnSelectAddLine)
self.insertLineCtrl.clear()
lineNoList = [str(n) for n in
[' top '] + range(1, len(self.table) + 1)]
self.insertLineCtrl.addItems(lineNoList)
addBox.addWidget(self.insertLineCtrl)
self.insertBtn = QtWidgets.QPushButton('Insert', self)
self.insertBtn.installEventFilter(self)
EXPL[
self.insertBtn] = "Move the Selected Row(s) from AddRowView to" \
"under the Selected Line No in MainView."
self.insertBtn.setFixedWidth(90)
self.insertBtn.clicked.connect(self.OnInsert)
addBox.addWidget(self.insertBtn)
addBox.addStretch(1)
# addTableView: to view all rows to add
self.addTableView = QtWidgets.QTableWidget(self)
self.addTableView.installEventFilter(self)
EXPL[
self.addTableView] = "AddRowView, where the rows to be added to" \
"MainView can be editted before adding to" \
"table."
self.addTableView.setMaximumHeight(200)
self.addTableView.cellClicked.connect(self.OnAddTableClick)
self.addTableView.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection)
mainLayout.addWidget(self.addTableView)
self._setButtonsDisabled()
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.Enter:
if object not in EXPL.keys():
return False
P = object.pos()
QtWidgets.QToolTip.showText(
self.mapToGlobal(QtCore.QPoint(P.x(), P.y() + 20)),
EXPL[object])
return True
return False
# def _setButtonsDisabled
# author: Lan Dam
# updated: 201703
# disabled all buttons (at the beginning and when change selectioncriteria)
def _setButtonsDisabled(self):
self.changeBtn.setEnabled(False)
self.moveBtn.setEnabled(False)
self.moveLineCtrl.setEnabled(False)
self.deleteBtn.setEnabled(False)
self.unDeleteBtn.setEnabled(False)
self.addBtn.setEnabled(False)
self.insertBtn.setEnabled(False)
self.insertLineCtrl.setEnabled(False)
self.characterOrderCtrl.setEnabled(False)
self.changeChar2XBtn.setEnabled(False)
self.plusX2CharBtn.setEnabled(False)
self.plusX2ColBtn.setEnabled(False)
self.changeCol2XBtn.setEnabled(False)
self.changeChar2XBtn.setEnabled(False)
self.XCtrl.setEnabled(False)
self.back2orgBtn.setEnabled(False)
self.resetColBtn.setEnabled(False)
###############################
# def OnClearSelected
# author: Lan Dam
# updated: 201703
# Clear all Selected cells on oth MainTableView and addTableView
def OnClearSelected(self, event):
self.mainTableView.clearSelection()
self.addTableView.clearSelection()
self.selectedCells = []
self.addCells = None
self._setButtonsDisabled()
###############################
# def OnMainTableClick
# author: Lan Dam
# updated: 201703
# when a cell is selected, mark all cells on MainTableView
# with the same value that match the selection criteria chosen
def OnMainTableClick(self, row, column):
# print "OnMainTableClick"
self.changeBtn.setEnabled(True)
self.insertBtn.setEnabled(False)
self.insertLineCtrl.setEnabled(False)
self.XCtrl.setEnabled(True)
self.changeCol2XBtn.setEnabled(True)
self.back2orgBtn.setEnabled(True)
self.resetColBtn.setEnabled(True)
# clear selection in addTableView if there is some
if self.addCells is not None:
self.addTableView.clearSelection()
self.addCells = None # so that OnChange will take effect mainTable
# Identify which cell(s) are selected
value = self.mainTableView.item(row, column).text()
if self.singleCell.isChecked():
self.selectedCells = [(row, column)]
selectedRows = [str(row + 1)]
elif self.allInStation.isChecked():
# get all entries that have the same stationName with the
# selected cell
statCol = self.labels.index('id_s')
statName = self.mainTableView.item(row, statCol).text()
# statRowList: all rows with station id similar to selected row's
# stationid
statRowList = [i for i in range(len(self.updatedTable)) if
self.updatedTable[i][statCol] == statName]
# mark selected for that station's cells that have the same value
self.selectedCells, selectedRows =\
self._selectMatchInList(value,
column,
statRowList,
self.mainTableView)
elif self.allInColumn.isChecked():
# mark selected for that column cells that have the same value
self.selectedCells, selectedRows =\
self._selectMatchInList(value,
column,
range(len(self.table)),
self.mainTableView)
self.changedValCtrl.setText(value)
self.selectedRowsCtrl.setText('-'.join(selectedRows))
# column tools
self.selectedCol = column
self.selectedColumnCtrl.setText(self.labels[column])
self._afterUpdateCol()
# Identify which options should be enable
if self.allInStation.isChecked() or self.singleCell.isChecked():
# enable add and delete options
self.addBtn.setEnabled(True)
noDel = True
undelApplicable = True
for r, c in self.selectedCells:
if r not in self.deleteList:
undelApplicable = False
else:
noDel = False
if undelApplicable: # all rows have been deleted allow
# undelete option
self.unDeleteBtn.setEnabled(True)
self.deleteBtn.setEnabled(False)
else:
self.unDeleteBtn.setEnabled(False)
self.deleteBtn.setEnabled(True)
if noDel: # no rows have been deleted allow move option
self.moveBtn.setEnabled(True)
self.moveLineCtrl.setEnabled(True)
else:
self.moveBtn.setEnabled(False)
self.moveLineCtrl.setEnabled(False)
else:
# disable move, add, delete, undelete options when too many
# cells are selected
self.moveBtn.setEnabled(False)
self.moveLineCtrl.setEnabled(False)
self.addBtn.setEnabled(False)
self.deleteBtn.setEnabled(False)
self.unDeleteBtn.setEnabled(False)
# def OnAddTableClick
# author: Lan Dam
# updated: 201703
# when a cell is selected, mark all cells on AddTableView
# with the same value that match the selection criteria chosen
# * disable all options except change and insert options
# * clear selection in mainTableView
# * identify which cell(s) are selected
# * set the selected value in changedValCtrl
def OnAddTableClick(self, row, column):
self.changeBtn.setEnabled(True)
self.moveBtn.setEnabled(False)
self.moveLineCtrl.setEnabled(False)
self.addBtn.setEnabled(False)
self.deleteBtn.setEnabled(False)
self.unDeleteBtn.setEnabled(False)
self.insertBtn.setEnabled(True)
self.insertLineCtrl.setEnabled(True)
self.characterOrderCtrl.setEnabled(False)
self.XCtrl.setEnabled(False)
self.changeCol2XBtn.setEnabled(False)
self.changeChar2XBtn.setEnabled(False)
self.back2orgBtn.setEnabled(False)
self.resetColBtn.setEnabled(False)
# clear selection in mainTableView
self.mainTableView.clearSelection()
self.selectedCells = []
value = self.addTableView.item(row, column).text()
if self.singleCell.isChecked():
self.addCells = [(row, column)]
elif self.allInStation.isChecked():
# get all entries that have the same stationName with the
# selected cell
statCol = self.labels.index('id_s')
statName = self.addTableView.item(row, statCol).text()
statRowList = [i for i in range(len(self.addDataList)) if
self.addDataList[i][statCol] == statName]
# mark selected for that station's cells that have the same value
self.addCells, selectedRows =\
self._selectMatchInList(value,
column,
statRowList,
self.addTableView)
else:
# mark selected for that column cells that have the same value
self.addCells, selectedRows =\
self._selectMatchInList(value,
column,
range(len(self.addDataList)),
self.addTableView)
self.changedValCtrl.setText(value)
###############################
# def _selectMatchInList
# author: Lan Dam
# updated: 201703
# mark selected for all cells in _list that match the given value
# return list of cells selected
def _selectMatchInList(self, value, column, _list, tableView):
selectedCells = []
selectedRows = []
for r in _list:
currItem = tableView.item(r, column)
if value == currItem.text():
currItem.setSelected(True)
selectedCells.append((r, column))
selectedRows.append(str(r + 1))
return selectedCells, selectedRows
###############################
# def OnXChanged
# author: Lan Dam
# updated: 201705
# check condition to decide to enable plusX__ buttons in need
# if XCtrl is integer:
# * enable plusX2CharBtn if all chars at the selected position(s) of
# the selected column are digit
# * enable plusX2ColBtn if type of col is int or float, in case of the
# str type, check if all column's values are digit
def OnXChanged(self, arg):
# print "OnXChanged:", arg
self.plusX2CharBtn.setEnabled(False)
self.plusX2ColBtn.setEnabled(False)
try:
int(self.XCtrl.text())
except BaseException:
return
if self.nondigitList == []:
self.plusX2CharBtn.setEnabled(True)
type_ = self.types[
self.labels.index(str(self.selectedColumnCtrl.text()))]
if type_ in [float, int]:
self.plusX2ColBtn.setEnabled(True)
else:
col_nondigitList = [colVal for colVal in self.selectedColList if
not colVal.isdigit()]
if col_nondigitList == []:
self.plusX2ColBtn.setEnabled(True)
###############################
# def OnChangeCharOrder
# author: Lan Dam
# updated: 201705
# when characterOrderCtrl is changed:
# * change item list of noOfCharsCtrl
# * reset nondigitList (list of chars at the selected position(s) of
# the selected column that are non-digit)
def OnChangeCharOrder(self, arg):
if not self.characterOrderCtrl.isEnabled():
return
self.noOfCharsCtrl.clear()
self.nondigitList = []
self.noOfCharsCtrl.addItems([str(item) for item in range(1, len(
self.selectedColList[0]) - arg + 1)])
###############################
# def OnChangeNoOfChars
# author: Lan Dam
# updated: 201705
# when select characterOrderCtrl, build up nondigitList (list of chars
# at the selected position(s) of the selected column that are non-digit)
# if nondigitList is [] (all are digit, enable plus2CharBtn according
# to XCtrl)
def OnChangeNoOfChars(self, arg):
order = self.characterOrderCtrl.currentIndex()
noOfChars = arg + 1
self.nondigitList = [i for i in range(len(self.selectedColList))
if not str(
self.selectedColList[i][order:order + noOfChars]).isdigit()]
self.plusX2CharBtn.setEnabled(False)
if self.nondigitList == []:
try:
int(self.XCtrl.text())
self.plusX2CharBtn.setEnabled(True)
except BaseException:
pass
###############################
# def OnChangeChar2X
# author: Lan Dam
# updated: 201705
# change selected chars in selected column to XCtrl.text()
# convert new col value to right type of column
# need to do it through newColumnList to be able to keep original
# value in case checking type has error
# then _updateColItem
def OnChangeChar2X(self):
if not self._checkEmpty("character"):
return
# check type
type_ = self.types[
self.labels.index(str(self.selectedColumnCtrl.text()))]
try:
index = 0
newColumnList = []
for val in self.selectedColList:
val = str(val)
order = self.characterOrderCtrl.currentIndex()
noOfChars = self.noOfCharsCtrl.currentIndex() + 1
if len(str(self.XCtrl.text())) != noOfChars:
msg = "On line %s, the character(s) need to change" \
"is/are '%s'," + \
"\nwhile the replace character(s) is/are '%s' " \
"of which length is different."
QtWidgets.QMessageBox.warning(self, "Error", msg % (
index + 1, val[order:order + noOfChars],
str(self.XCtrl.text())))
return
val = list(val)
val[order:order + noOfChars] = str(self.XCtrl.text())
val = ''.join(val)
newColumnList.append(type_(val))
index += 1
except ValueError:
msg = "The new value of '%s', line %s is '%s' which doesn't match"\
"the required type: %s"
QtWidgets.QMessageBox.warning(self, "Error", msg % (
self.selectedColumnCtrl.text(), index + 1, val,
type_.__name__))
return
for r in range(len(self.updatedTable)):
self._updateColItem(r, newColumnList[r])
self._afterUpdateCol()
###############################
# def OnPlusX2Char
# author: Lan Dam
# updated: 201705
# plus selected chars in selected column to XCtrl.text()
# check the number of new chars is the same
# convert new col value to right type of column
# need to do it through newColumnList to be able to keep original value
# in case checking type has error
# then _updateColItem
def OnPlusX2Char(self):
type_ = self.types[
self.labels.index(str(self.selectedColumnCtrl.text()))]
try:
index = 0
newColumnList = []
for val in self.selectedColList:
val = str(val)
order = self.characterOrderCtrl.currentIndex()
noOfChars = self.noOfCharsCtrl.currentIndex() + 1
insertChars = str(
int(val[order:order + noOfChars]) + int(self.XCtrl.text()))
if len(insertChars) > noOfChars:
msg = "On line %s, the character(s) need to" \
"change is '%s'," + \
"\nwhile the replace character(s) is/are %s of" \
"which length is different."
QtWidgets.QMessageBox.warning(self, "Error", msg % (
index + 1, val[order:order + noOfChars], insertChars))
return
val = list(val)
val[order:order + noOfChars] = insertChars.zfill(noOfChars)
val = ''.join(val)
newColumnList.append(type_(val))
index += 1
except ValueError:
msg = "The new value of '%s', line %s is '%s' which doesn't" \
"match the required type: %s"
QtWidgets.QMessageBox.warning(self, "Error", msg % (
self.selectedColumnCtrl.text(), index + 1, val,
type_.__name__))
return
for r in range(len(self.updatedTable)):
self._updateColItem(r, newColumnList[r])
self._afterUpdateCol()
###############################
# def _checkEmpty
# author: Lan Dam
# updated: 201705
# return False if the value in XCtrl is empty
def _checkEmpty(self, ctrlName):
if str(self.XCtrl.text()).strip() == "":
msg = "The value in the X box is '%s'.\n" \
"Are you sure you want to change the selected %s to it?" % (
self.XCtrl.text(), ctrlName)
result = QtWidgets.QMessageBox.question(
self, "Are you sure?", msg,
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.Cancel)
if result == QtWidgets.QMessagedBox.Cancel:
return False
return True
###############################
# def OnChangeCol2X
# author: Lan Dam
# updated: 201705
# change the cells value of the selectedCol to the value in XCtrl
def OnChangeCol2X(self):
if not self._checkEmpty("column"):
return
type_ = self.types[
self.labels.index(str(self.selectedColumnCtrl.text()))]
try:
newVal = type_(self.XCtrl.text())
except ValueError:
msg = "The new value of all cells in '%s' is '%s'," \
"\nwhich doesn't match the required type: %s"
QtWidgets.QMessageBox.warning(self, "Error", msg % (
self.selectedColumnCtrl.text(), self.XCtrl.text(),
type_.__name__))
return
for r in range(len(self.updatedTable)):
self._updateColItem(r, newVal)
self._afterUpdateCol()
###############################
# def OnResetCol
# author: Lan Dam
# updated: 201705
# reset the values of cells on the selectedCol back to theirs original
# values
def OnResetCol(self):
for r in range(len(self.updatedTable)):
self._updateColItem(r, self.table[r][self.selectedCol])
self._afterUpdateCol()
###############################
# def OnPlusX2Col
# author: Lan Dam
# updated: 201705
# plus the value in X to all cell in the selectedCol
# plus as int first to avoid the result in float format then as float
# (the button only available if value in XCtrl and in selectedColList
# are number)
def OnPlusX2Col(self):
type_ = self.types[
self.labels.index(str(self.selectedColumnCtrl.text()))]
index = 0
newColumnList = []
for r in range(len(self.updatedTable)):
try:
newVal = int(self.XCtrl.text()) + int(self.selectedColList[r])
except ValueError:
newVal = float(self.XCtrl.text()) + float(
self.selectedColList[r])
try:
newColumnList.append(type_(newVal))
except ValueError:
msg = "The new value of %s, line %s is %s," \
"\nwhich doesn't match the required type: %s"
QtWidgets.QMessageBox.warning(self, "Error", msg % (
self.selectedColumnCtrl.text(), index + 1, newVal,
type_.__name__))
return
for r in range(len(self.updatedTable)):
self._updateColItem(r, newColumnList[r])
self._afterUpdateCol()
###############################
# def _updateColItem
# author: Lan Dam
# updated: 201705
# update the item on given row, selectedCol to newVal
# change color depend on the changed value is the original value or not
def _updateColItem(self, r, newVal):
currItem = self.mainTableView.item(r, self.selectedCol)
currItem.setText(str(newVal))
self.updatedTable[r][self.selectedCol] = newVal
if self.table[r][self.selectedCol] != newVal:
currItem.setForeground(QtCore.Qt.red)
if r not in self.deleteList:
self._changeRowBackground(r, UPDATECOLOR)
if r not in self.updateList:
self.updateList.append(r)
else:
currItem.setForeground(QtCore.Qt.black)
updated = False
for i in range(len(self.labels)):
if self.updatedTable[r][i] != self.table[r][i]:
updated = True
break
if updated is False:
if r not in self.deleteList:
self._changeRowBackground(r, QtCore.Qt.white)
if r in self.updateList:
self.updateList.remove(r)
# def _afterUpdateCol
# author: Lan Dam
# updated: 201705
# update selectedColList
# set characterOrderCtrl, changeChar2XBtn, plusX2ColBtn, plusX2Col
# depend on type and length of the selectedColList
def _afterUpdateCol(self):
self.selectedColList = self.updatedTable[:, self.selectedCol]
difLen = [len(item) for item in self.selectedColList if
len(item.strip()) != len(self.selectedColList[0].strip())]
if difLen == []:
self.changeChar2XBtn.setEnabled(True)
self.characterOrderCtrl.setEnabled(True)
self.characterOrderCtrl.clear()
self.characterOrderCtrl.addItems(
[str(i + 1) for i in range(len(self.selectedColList[0]))])
else:
self.characterOrderCtrl.clear()
self.characterOrderCtrl.setEnabled(False)
self.changeChar2XBtn.setEnabled(False)
self.plusX2ColBtn.setEnabled(False)
type_ = self.types[
self.labels.index(str(self.selectedColumnCtrl.text()))]
if str(self.XCtrl.text()).isdigit() and type_ in [float, int]:
self.plusX2ColBtn.setEnabled(True)
###############################
# def OnChange
# author: Lan Dam
# updated: 201703
# Change the values of the selected cells into the value in changedValCtrl
# on MainTableView if self.addCells == None
# * not change if there are any rows deleted
# * change text in cell(s)
# * if the change is back to the orginal value, cell color will be
# resetted (then rows => remove from updateList)
# * else: change foreground color of cell(s), change background color
# of row(s) => add to updateList if not in updateList yet
# => get the rowdata from the table with the new value at the col,
# but the type keep the same
# on AddTableView if self.addCells != None
# * change text & color in cell(s) and change the column value to the
# one in changedValCtrl but type keep the same
def OnChange(self, event):
if self.addCells is None:
for r, c in self.selectedCells:
if r in self.deleteList:
msg = "Because the row %s has been deleted, cell" \
"(%s,%s) can't be changed." % (
r + 1, r + 1, c + 1)
QtWidgets.QMessageBox.warning(self, "Warning", msg)
continue
currItem = self.mainTableView.item(r, c)
currItem.setText(self.changedValCtrl.text())
self.updatedTable[r][c] = type(self.updatedTable[r][c])(
self.changedValCtrl.text())
if currItem.text() == self.table[r][c]:
currItem.setForeground(QtCore.Qt.black)
updated = False
for i in range(len(self.labels)):
if self.updatedTable[r][i] != self.table[r][i]:
updated = True
break
if updated is False:
self._changeRowBackground(r, QtCore.Qt.white)
if r in self.updateList:
self.updateList.remove(r)
else:
currItem.setForeground(QtCore.Qt.red)
self._changeRowBackground(r, UPDATECOLOR)
if r not in self.updateList:
self.updateList.append(r)
else:
for r, c in self.addCells:
currItem = self.addTableView.item(r, c)
currItem.setText(self.changedValCtrl.text())
currItem.setForeground(QtCore.Qt.red)
self.addDataList[r][c] = type(self.addDataList[r][c])(
self.changedValCtrl.text())
###############################
# def OnBack2org
# author: Lan Dam
# updated: 201705
# reset all changes in selectedCells back to original (self.table)
# accept change in delete rows, but still keep them as deleted
# change the text color to black (unchanged)
# but the row color change or not depend on other cells in the row
def OnBack2org(self, event):
for r, c in self.selectedCells:
currItem = self.mainTableView.item(r, c)
currItem.setText(str(self.table[r][c]))
self.updatedTable[r][c] = self.table[r][c]
currItem.setForeground(QtCore.Qt.black)
updated = False
for i in range(len(self.labels)):
if self.updatedTable[r][i] != self.table[r][i]:
updated = True
break
if updated is False:
if r not in self.deleteList:
self._changeRowBackground(r, QtCore.Qt.white)
if r in self.updateList:
self.updateList.remove(r)
###############################
# def OnDelete
# author: Lan Dam
# updated: 201703
# * change color of selected Cells to DELETECOLOR
# * add those rows to self.deleteList
# * disable delete option, enable undelete option
def OnDelete(self, event):
for row, column in self.selectedCells:
self._changeRowBackground(row, DELETECOLOR)
if row not in self.deleteList:
self.deleteList.append(row)
self.deleteBtn.setEnabled(False)
self.unDeleteBtn.setEnabled(True)
###############################
# def OnUndelete
# author: Lan Dam
# updated: 201703
# * Change selected delete rows background back to white
# * remove those rows from self.deleteList
# * enable delete option, disable undelete option
def OnUndelete(self, event):
for row, column in self.selectedCells:
if row in self.updateList:
self._changeRowBackground(row, UPDATECOLOR)
else:
self._changeRowBackground(row, QtCore.Qt.white)
if row in self.deleteList:
self.deleteList.remove(row)
self.deleteBtn.setEnabled(True)
self.unDeleteBtn.setEnabled(False)
###############################
# def OnAdd
# author: Lan Dam
# updated: 201703
# Copy the selected Rows from MaintableView into the AddTableView
# * append selected rows to self.addDataList
# * clear AddTableView and display addDataList in AddTableView
def OnAdd(self, event):
for row, column in self.selectedCells:
self.addDataList.append(deepcopy(self.updatedTable[row]))
# clear existing data
self.addTableView.clear()
# set view range
self.addTableView.setColumnCount(len(self.labels))
self.addTableView.setRowCount(len(self.addDataList))
# set data into cells
for r in range(len(self.addDataList)):
for c in range(len(self.labels)):
item = QtWidgets.QTableWidgetItem(self.addDataList[r][c])
item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
# disable cell editing
self.addTableView.setItem(r, c, item)
# change to fit columns with its contents instead of having same
# default size for all columns
self.addTableView.resizeColumnsToContents()
# set horizontal Headers
self.addTableView.setHorizontalHeaderLabels(self.labels)
self.addTableView.horizontalHeader().setVisible(True)
# set Tool tip for each horizontal header
for c in range(len(self.labels)):
self.addTableView.horizontalHeaderItem(c).setToolTip(
self.labels[c])
###############################
# def OnSelectMoveLine
# author: Lan Dam
# updated: 201703
# set the row selected according to what line show in moveLineCtrl
def OnSelectMoveLine(self, index):
self._selectLine(self.moveLineCtrl)
###############################
# def OnSelectAddLine
# author: Lan Dam
# updated: 201703
# set the row selected according to what line show in insertLineCtrl
def OnSelectAddLine(self, index):
self._selectLine(self.insertLineCtrl)
###############################
# def _selectLine
# author: Lan Dam
# updated: 201703
# set the row selected according to what line show in passed lineCtrl
# * if val=top => select line before the first line
# * else: highlight the selected line
def _selectLine(self, lineCtrl):
val = str(lineCtrl.currentText())
if val == " top ":
self.mainTableView.clearSelection()
self.mainTableView.scrollToTop()
else:
self.mainTableView.setSelectionMode(
QtWidgets.QAbstractItemView.ExtendedSelection)
lineId = int(val) - 1
self.mainTableView.selectRow(lineId)
# self.mainTableView.scrollTo(self.mainTableView.item(lineId,0))
self.mainTableView.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection)
###############################
# def OnInsert
# author: Lan Dam
# updated: 201704
# remove selectedRows from self.addDataList + AddTableView and add to
# MainTableView
# * pop selected from their postions in addTableView
# * insert into new postions in MainTableView
def OnInsert(self, event):
insertLineId = self.insertLineCtrl.currentIndex()
if self.minChangedRowId is None:
self.minChangedRowId = insertLineId
if self.minChangedRowId > insertLineId:
self.minChangedRowId =\
insertLineId
self.addCells.sort(key=itemgetter(0), reverse=True)
# identify data to insert to mainTableView and
# remove data from its current position in addDataList and addTableView
insertedData = []
for r, c in self.addCells:
rowData = deepcopy(self.addDataList[r])
insertedData.append(rowData)
self.addDataList.remove(rowData)
self.addTableView.removeRow(r)
# insert the data to MainTableView
self._insertDataToTable(insertLineId, insertedData, insertedData,
len(insertedData))
###############################
# def OnMove
# author: Lan Dam
# updated: 201704
# move selectedRows to new positions in MainTableView
# * pop selected from their postions in MainTableView
# * insert into new postions in MainTableView
def OnMove(self, event):
self.selectedCells.sort(key=itemgetter(0), reverse=True)
selectedRows = [r[0] for r in self.selectedCells]
moveLineId = self.moveLineCtrl.currentIndex()
if moveLineId in selectedRows:
msg = "Cannot move the select row(s) to a line No \nthat in the" \
"range of the selected rows"
QtWidgets.QMessageBox.warning(self, "Warning", msg)
return
# reidentify new moveLineId when pop the selected Rows from table
if moveLineId > max(selectedRows):
moveLineId -= len(selectedRows)
minId = min(selectedRows + [moveLineId])
if self.minChangedRowId is None:
self.minChangedRowId = minId
if self.minChangedRowId > minId:
self.minChangedRowId = minId
# identify data to insert to mainTableView and
# remove data from its current position in table and mainTableView
insertedData = []
insertedUpdData = []
for r in selectedRows:
rowData = deepcopy(self.table[r])
insertedData.append(rowData)
rowData = deepcopy(self.updatedTable[r])
insertedUpdData.append(rowData)
self.table.pop(r)
self.updatedTable = numpy.delete(self.updatedTable, (r), axis=0)
self.mainTableView.removeRow(r)
# insert the data to MainTableView
self._insertDataToTable(moveLineId, insertedData, insertedUpdData,
len(selectedRows), max(selectedRows))
###############################
# def _insertDataToTable
# author: Lan Dam
# updated: 201704
# insert the passed insertData into the passed lineId in MainTableView
def _insertDataToTable(self, lineId, insertData, insertedUpdData,
lenInsert, maxRemovedRow=None):
for i in range(len(insertData)):
# add inserted data into self.table
# since insertData in backward order, the previous insert can be
# moved downward
# lineId can be used for inserting without any changes
self.table.insert(lineId, insertData[i])
self.updatedTable = numpy.insert(self.updatedTable, lineId,
insertedUpdData[i], 0)
# create new empty row in mainTableView
self.mainTableView.insertRow(lineId)
# fill value in insertedUpdData[i] into the empty row
for c in range(len(self.labels)):
item = QtWidgets.QTableWidgetItem(insertedUpdData[i][c])
item.setFlags(
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
# disable cell editing
self.mainTableView.setItem(lineId, c, item)
# update values in deleteList and updateList
lineId -= 1
for i in range(len(self.deleteList)):
# delete row ids should be moved downward when there are rows
# inserted before them
if self.deleteList[i] > lineId:
self.deleteList[i] += lenInsert
# delete row ids should be moved upward when there are rows
# removed before them
if maxRemovedRow is not None and\
self.deleteList[i] > maxRemovedRow:
self.deleteList[i] -= lenInsert
for k in self.updateList:
# update row ids should be moved downward when there are rows
# inserted before them
if self.updateList[i] > lineId:
self.updateList[i] += lenInsert
# update row ids should be moved upward when there are rows
# removed before them
if maxRemovedRow is not None and\
self.updateList[i] > maxRemovedRow:
self.updateList[i] -= lenInsert
###############################
# def _changeRowBackground
# author: Lan Dam
# updated: 201703
# change the background of the given row to the given color by changing
# color of each cell
def _changeRowBackground(self, row, color):
for column in range(self.mainTableView.columnCount()):
self.mainTableView.item(row, column).setBackground(color)
###############################
# def ToString
# author: Lan Dam
# updated: 201703
# convert the data in mainTableViews to string in kef format
# * use data from updatedTable
# * if the row in deleteList, skip
def ToString(self, currText, tableCount):
for r in range(len(self.updatedTable)):
if r in self.deleteList:
continue
tableCount += 1
if tableCount % 100 == 0:
self.parent.statusBar.showMessage("Saving Kef file: %s/%s" % (
tableCount, self.parent.totalLines))
print "Saving Kef file: %s/%s" % (
tableCount, self.parent.totalLines)
currText += "\n# Table row %d" % tableCount
# Print table name
currText += "\n" + self.path
for c in range(len(self.labels)):
currText += "\n\t%s=%s" % (
self.labels[c], self.updatedTable[r][c])
return currText, tableCount
###############################
# def SaveCSV
# author: Lan Dam
# updated: 201705
# save data into a CSV file with delimeter=';'
def SaveCSV(self, savedFileName):
# use updated data from updated table
DAT = self.updatedTable
# delete row in deleteList
DAT = numpy.delete(DAT, self.deleteList, axis=0)
# combine with labels
DAT = numpy.vstack((numpy.array(self.labels), DAT))
# save into a text file with the given savedFileName, delimiter=';'
numpy.savetxt(savedFileName, DAT, fmt='%s', delimiter=';',
header="KEF Editor Ver. %s" % PROG_VERSION)
###############################
# def UpdatePH5
# author: Lan Dam
# updated: 201705
# update the table into the current PH5 file
# (use data from updatedTable, the type already convert to org type
# when updating)
def UpdatePH5(self):
pp = self.path.split('/')
name = pp[-1]
path = self.path.replace('/' + name, '')
# get the node for the path
ph5 = self.parent.ph5api.ph5
node = ph5.get_node(where=path, name=name, classname='Table')
# remove all the rows follow the lowest insert row because tables
# class doesn't allow inserting
if self.minChangedRowId is not None:
node.remove_rows(self.minChangedRowId)
# r: rowId
# row: row in node
r = 0
# vtypes = node.coltypes
for row in node.iterrows():
# remove row in deleteList
if r in self.deleteList:
node.remove_row(r)
# update item in updateList
if r in self.updateList:
for c in range(len(self.labels)):
try:
row.__setitem__(self.labels[c],
self.updatedTable[r][c])
except IndexError as e:
pass
row.update()
r += 1
# from lowest insert row, start to append the rest of the updated Table
if self.minChangedRowId is not None:
row = node.row
for r in range(self.minChangedRowId, len(self.updatedTable)):
if r in self.deleteList:
continue
try:
for c in range(len(self.labels)):
row[self.labels[c]] = self.updatedTable[r][c]
except Exception, e:
LOGGER.warning(
"Warning in append: Exception \'{0}\'".format(e))
row.append()
# flush all changes
node.flush()
##########################################
# CLASS ####################
# Author: Lan
# Updated: 201703
# CLASS: SelectTableDialog - GUI for user to select parameters for table
class SelectTableDialog(QtWidgets.QDialog):
def __init__(self, parent, availTables, arrays, shotLines, offsets, das):
QtWidgets.QDialog.__init__(self)
self.setWindowTitle("Select Tables")
self.parent = parent
mainLayout = QtWidgets.QVBoxLayout(self)
mainLayout.addWidget(
QtWidgets.QLabel('What table do you want to get info from?'))
formLayout = QtWidgets.QFormLayout()
mainLayout.addLayout(formLayout)
self.tableCtrl = QtWidgets.QComboBox(self)
self.tableCtrl.clear()
self.tableCtrl.addItems([''] + availTables)
formLayout.addRow("Table", self.tableCtrl)
self.tableCtrl.currentIndexChanged.connect(self.OnSelectTable)
self.arrayCtrl = QtWidgets.QComboBox(self)
self.arrayCtrl.clear()
self.arrayCtrl.addItems([''] + arrays)
formLayout.addRow("Array", self.arrayCtrl)
self.shotLineCtrl = QtWidgets.QComboBox(self)
self.shotLineCtrl.clear()
self.shotLineCtrl.addItems([''] + shotLines)
formLayout.addRow("ShotLine", self.shotLineCtrl)
self.offsetCtrl = QtWidgets.QComboBox(self)
self.offsetCtrl.clear()
self.offsetCtrl.addItems([''] + offsets)
formLayout.addRow("Offset (array_event)", self.offsetCtrl)
self.dasCtrl = QtWidgets.QComboBox(self)
self.dasCtrl.clear()
self.dasCtrl.addItems([''] + das)
formLayout.addRow("Das", self.dasCtrl)
btnLayout = QtWidgets.QHBoxLayout()
mainLayout.addLayout(btnLayout)
btnLayout.stretch(1)
submitBtn = QtWidgets.QPushButton('Submit', self)
submitBtn.clicked.connect(self.OnSubmit)
btnLayout.addWidget(submitBtn)
btnLayout.stretch(1)
cancelBtn = QtWidgets.QPushButton('Cancel', self)
cancelBtn.clicked.connect(self.OnCancel)
btnLayout.addWidget(cancelBtn)
btnLayout.stretch(1)
self._disableCtrls()
self.show()
def OnCancel(self, evt):
self.close()
def _disableCtrls(self):
self.arrayCtrl.setEnabled(False)
self.shotLineCtrl.setEnabled(False)
self.dasCtrl.setEnabled(False)
self.offsetCtrl.setEnabled(False)
###############################
# def OnSelectTable
# author: Lan Dam
# updated: 201703
# when a tableType is selected, enable the properties needed
def OnSelectTable(self, index):
self._disableCtrls()
tableType = self.tableCtrl.currentText()
if tableType == 'Array_t':
self.arrayCtrl.setEnabled(True)
elif tableType == 'Event_t':
self.shotLineCtrl.setEnabled(True)
elif tableType == 'Das_t':
self.dasCtrl.setEnabled(True)
elif tableType == 'Offset_t':
self.offsetCtrl.setEnabled(True)
###############################
# def OnSubmit
# author: Lan Dam
# updated: 201703
# use kefutility.PH5toTableData to read the required table into dataTable
# call parent.setData() to set dataTable into MainTableView
def OnSubmit(self, evt):
p = self.parent
p.tableType = str(self.tableCtrl.currentText())
errorCtrl = None
if p.tableType == 'Array_t':
p.arg = str(self.arrayCtrl.currentText())
if p.arg == "":
errorCtrl = 'Array'
elif p.tableType == 'Event_t':
p.arg = str(self.shotLineCtrl.currentText())
if p.arg == "":
errorCtrl = 'ShotLine'
elif p.tableType == 'Das_t':
p.arg = str(self.dasCtrl.currentText())
if p.arg == "":
errorCtrl = 'Das'
elif p.tableType == 'Offset_t':
p.arg = str(self.offsetCtrl.currentText())
if p.arg == "":
errorCtrl = 'Offset'
else:
p.arg = None
if errorCtrl is not None:
msg = "For Table '%s', %s must be selected." % (
p.tableType, errorCtrl)
QtWidgets.QMessageBox.warning(self, "Warning", msg)
return
p.dataTable, p.labelSets, p.totalLines, p.types =\
kefutility.PH5toTableData(
p.statusBar, p.ph5api, p.filename,
p.path2file, p.tableType, p.arg)
p.setData()
p.openTableAction.setEnabled(True)
p.updatePH5Action.setEnabled(True)
p.notsave is True
self.close()
# CLASS ####################
# Author: Lan
# Updated: 201409
# CLASS: Seperator - is the line to separate in the Gui (reuse from PH5View)
class Seperator(QtWidgets.QFrame):
def __init__(self, thick=2, orientation="horizontal", length=None):
QtWidgets.QFrame.__init__(self)
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setFrameShadow(QtWidgets.QFrame.Sunken)
if orientation == 'horizontal':
self.setFixedHeight(thick)
if length is not None:
self.setFixedWidth(length)
else:
self.setFixedWidth(thick)
if length is not None:
self.setFixedHeight(length)
##########################################
# CLASS ####################
# Author: Lan
# Updated: 201707
# CLASS: ManWindow - show Manual of the app. (reuse from PH5View)
class ManWindow(QtWidgets.QWidget):
def __init__(self, mantype=""):
QtWidgets.QWidget.__init__(self)
self.setGeometry(100, 100, 900, 700)
view = QtWidgets.QTextBrowser(self)
if mantype == "manual":
view.setText(kefutility.html_manual)
elif mantype == "whatsnew":
view.setText(kefutility.html_whatsnew % PROG_VERSION)
self.layout = QtWidgets.QHBoxLayout()
self.layout.addWidget(view)
self.setLayout(self.layout)
self.show()
def startapp():
global application
application = QtWidgets.QApplication(sys.argv)
win = KefEdit()
sys.exit(application.exec_())
return win
if __name__ == "__main__":
startapp()
|
# -*- coding: utf-8 -*-
"""
userconfig
==========
Module handling configuration files based on ConfigParser
userconfig License Agreement (MIT License)
------------------------------------------
Copyright (c) 2009 Pierre Raybaut
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__version__ = '1.1.0'
__license__ = __doc__
import os
import re
import os.path as osp
import shutil
import sys
import six
import io
import time
import configparser as cp
def get_home_dir():
"""
Return user home directory
"""
try:
path = osp.expanduser('~')
except:
path = ''
for env_var in ('HOME', 'USERPROFILE', 'TMP'):
if osp.isdir(path):
break
path = os.environ.get(env_var, '')
if path:
return path.decode(sys.getfilesystemencoding())
else:
raise RuntimeError('Please define environment variable $HOME')
class NoDefault:
pass
class UserConfig(cp.ConfigParser):
"""
UserConfig class, based on ConfigParser
name: name of the config
defaults: dictionary containing options
*or* list of tuples (section_name, options)
version: version of the configuration file (X.Y.Z format)
subfolder: configuration file will be saved in %home%/subfolder/.%name%.ini
Note that 'get' and 'set' arguments number and type
differ from the overriden methods
"""
DEFAULT_SECTION_NAME = 'main'
def __init__(self, name, defaults=None, load=True, version=None,
subfolder=None, backup=False, raw_mode=False,
remove_obsolete=False):
cp.ConfigParser.__init__(self)
self.raw = 1 if raw_mode else 0
self.subfolder = subfolder
if version and (re.match('^(\d+).(\d+).(\d+)$', version) is None):
raise ValueError("Version number %r is incorrect - must be in X.Y.Z format" % version)
self.name = name
if isinstance(defaults, dict):
defaults = [(self.DEFAULT_SECTION_NAME, defaults)]
self.defaults = defaults
if defaults:
self.reset_to_defaults(save=False)
fname = self.filename()
if backup:
try:
shutil.copyfile(fname, "%s.bak" % fname)
except IOError:
pass
if load:
# If config file already exists, it overrides Default options:
self.load_from_ini()
if version != self.get_version(version):
if backup:
try:
shutil.copyfile(fname, "%s-%s.bak" % (fname, old_ver))
except IOError:
pass
# Version has changed -> overwriting .ini file
self.reset_to_defaults(save=False)
if remove_obsolete:
self.__remove_deprecated_options()
# Set new version number
self.set_version(version, save=False)
if defaults is None:
# If no defaults are defined, set .ini file settings as default
self.set_as_defaults()
# In any case, the resulting config is saved in config file:
self.__save()
def get_version(self, version='0.0.0'):
"""Return configuration (not application!) version"""
return self.get(self.DEFAULT_SECTION_NAME, 'version', version)
def set_version(self, version='0.0.0', save=True):
"""Set configuration (not application!) version"""
self.set(self.DEFAULT_SECTION_NAME, 'version', version, save=save)
def load_from_ini(self):
"""
Load config from the associated .ini file
"""
try:
with io.open(self.filename(), 'r', encoding='utf-8') as configfile:
self.readfp(configfile)
except IOError:
pass
except cp.MissingSectionHeaderError:
six.print_("Warning: File contains no section headers.")
def __remove_deprecated_options(self):
"""
Remove options which are present in the .ini file but not in defaults
"""
for section in self.sections():
for option, _ in self.items(section, raw=self.raw):
if self.get_default(section, option) is NoDefault:
self.remove_option(section, option)
if len(self.items(section, raw=self.raw)) == 0:
self.remove_section(section)
def __save(self):
"""
Save config into the associated .ini file
"""
fname = self.filename()
try: # the "easy" way
with io.open(fname, 'w', encoding='utf-8') as conf_file:
self.write(conf_file)
except IOError:
try: # the "delete and sleep" way
if osp.isfile(fname):
os.remove(fname)
time.sleep(0.05)
with io.open(fname, 'w', encoding='utf-8') as conf_file:
self.write(conf_file)
except Exception as e:
six.print_("Failed to write user configuration file.")
six.print_("Please submit a bug report.")
raise e
def filename(self):
"""
Create a .ini filename located in user home directory
"""
folder = get_home_dir()
if self.subfolder is not None:
folder = osp.join(folder, self.subfolder)
try:
os.makedirs(folder)
except os.error:
# Folder (or one of its parents) already exists
pass
return osp.join(folder, '.%s.ini' % self.name)
def cleanup(self):
"""
Remove .ini file associated to config
"""
os.remove(self.filename())
def set_as_defaults(self):
"""
Set defaults from the current config
"""
self.defaults = []
for section in self.sections():
secdict = {}
for option, value in self.items(section, raw=self.raw):
secdict[option] = value
self.defaults.append((section, secdict))
def reset_to_defaults(self, save=True, verbose=False):
"""
Reset config to Default values
"""
for section, options in self.defaults:
for option in options:
value = options[option]
self.__set(section, option, value, verbose)
if save:
self.__save()
def __check_section_option(self, section, option):
"""
Private method to check section and option types
"""
if section is None:
section = self.DEFAULT_SECTION_NAME
elif not isinstance(section, (str, unicode)):
raise RuntimeError, "Argument 'section' must be a string"
if not isinstance(option, (str, unicode)):
raise RuntimeError, "Argument 'option' must be a string"
return section
def get_default(self, section, option):
"""
Get Default value for a given (section, option)
-> useful for type checking in 'get' method
"""
section = self.__check_section_option(section, option)
for sec, options in self.defaults:
if sec == section:
if option in options:
return options[option]
else:
return NoDefault
def get(self, section, option, default=NoDefault):
"""
Get an option
section=None: attribute a default section name
default: default value (if not specified, an exception
will be raised if option doesn't exist)
"""
section = self.__check_section_option(section, option)
if not self.has_section(section):
if default is NoDefault:
raise cp.NoSectionError(section)
else:
self.add_section(section)
if not self.has_option(section, option):
if default is NoDefault:
raise cp.NoOptionError(option, section)
else:
self.set(section, option, default)
return default
value = cp.ConfigParser.get(self, section, option, raw=self.raw)
default_value = self.get_default(section, option)
if isinstance(default_value, bool):
value = eval(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
value = int(value)
else:
if isinstance(default_value, six.string_types):
try:
value = value.decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
try:
# lists, tuples, ...
value = eval(value)
except:
pass
return value
def __set(self, section, option, value, verbose):
"""
Private set method
"""
if not self.has_section(section):
self.add_section(section)
if not isinstance(value, six.string_types):
value = repr(value)
if verbose:
six.print_('%s[ %s ] = %s' % (section, option, value))
cp.ConfigParser.set(self, section, option, value)
def set_default(self, section, option, default_value):
"""
Set Default value for a given (section, option)
-> called when a new (section, option) is set and no default exists
"""
section = self.__check_section_option(section, option)
for sec, options in self.defaults:
if sec == section:
options[option] = default_value
def set(self, section, option, value, verbose=False, save=True):
"""
Set an option
section=None: attribute a default section name
"""
section = self.__check_section_option(section, option)
default_value = self.get_default(section, option)
if default_value is NoDefault:
default_value = value
self.set_default(section, option, default_value)
if isinstance(default_value, bool):
value = bool(value)
elif isinstance(default_value, float):
value = float(value)
elif isinstance(default_value, int):
value = int(value)
elif not isinstance(value, six.string_types):
value = repr(value)
self.__set(section, option, value, verbose)
if save:
self.__save()
def remove_section(self, section):
cp.ConfigParser.remove_section(self, section)
self.__save()
def remove_option(self, section, option):
cp.ConfigParser.remove_option(self, section, option)
self.__save()
|
from flask import Flask, redirect, url_for, render_template, request, jsonify, g, session, flash
from flask_restful import Api
import requests
from discord import Webhook, RequestsWebhookAdapter
import sqlite3
import time
from multiprocessing import Process
from flask import Flask
import json
app = Flask(__name__)
app.secret_key = '6969691'
api = Api(app)
@app.route('/', methods=['GET'])
def api2():
if request.headers.getlist("X-Forwarded-For"):
ip = request.headers.getlist("X-Forwarded-For")[0]
ip = request.remote_addr
loc = requests.get(f"http://ip-api.com/json/{ip}?fields=17")
js = loc.json()
country = (js["country"])
city = (js["city"])
print(city)
url = f"https://google-search3.p.rapidapi.com/api/v1/search/q={country}+{city}+grilled+cheese+restaurant&num=10"
headers = {
'x-rapidapi-key': "",
'x-rapidapi-host': "google-search3.p.rapidapi.com"
}
response = requests.get(url, headers=headers)
dict1 = json.loads(response.text)
bob = dict1["results"]
links = []
for x in bob:
b = (x["link"])
links.append(b)
final = links
return render_template('grilled.html', result=final, title=title)
else:
ip = request.remote_addr
loc = requests.get(f"http://ip-api.com/json/{ip}?fields=17")
js = loc.json()
country = (js["country"])
city = (js["city"])
print(city)
url = f"https://google-search3.p.rapidapi.com/api/v1/search/q={country}+{city}+grilled+cheese+restaurant&num=10"
headers = {
'x-rapidapi-key': "",
'x-rapidapi-host': "google-search3.p.rapidapi.com"
}
response = requests.get(url, headers=headers)
dict1 = json.loads(response.text)
bob = dict1["results"]
links = []
for x in bob:
b = (x["link"])
links.append(b)
final = links
return render_template('grilled.html', result=final, title=title)
if __name__ == "__main__":
app.run(debug=True)
|
# list all targets
res = client.get_targets()
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# list first three targets using default sort
res = client.get_targets(limit=3)
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# list first three targets and sort by address
res = client.get_targets(limit=3, sort='address')
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# list all remaining targets
res = client.get_targets(continuation_token=res.continuation_token)
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# list with filter to see only targets that match a specific ip format
res = client.get_targets(filter='name=\'12.56.23.*\'')
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
# Other valid fields: ids, names, offset
# See section "Common Fields" for examples
|
from setuptools import setup
setup(
name='pyffsem',
version='0.1',
packages=['pyffsem'],
url='',
license='MIT',
author='Boris Klyus',
author_email='klyusba@gmail.com',
description='Pure Python realisation of FFSEM',
install_requires=[
'PyCryptodome',
]
)
|
import cv2
import torch
import torch.nn.functional as F
import re
from struct import unpack
import numpy as np
def warp(x, disp):
bs, ch, h, w = x.size()
bg, hg, wg = torch.meshgrid(torch.arange(0,bs) , torch.arange(0,h), torch.arange(0,w))
grid_b, grid_h, grid_w = bg.cuda(), hg.cuda(), wg.cuda()
warped_gw = torch.sub(grid_w,disp)
grid = torch.stack([warped_gw, grid_h.float()], dim=-1)
grid_normalized = ((grid*2)/torch.Tensor([w,h]).cuda()) - 1
output = F.grid_sample(x, grid_normalized, mode='bilinear', padding_mode='zeros')
return output
def readPFM(file):
with open(file, "rb") as f:
# Line 1: PF=>RGB (3 channels), Pf=>Greyscale (1 channel)
type = f.readline().decode('latin-1')
if "PF" in type:
channels = 3
elif "Pf" in type:
channels = 1
else:
sys.exit(1)
# Line 2: width height
line = f.readline().decode('latin-1')
width, height = re.findall('\d+', line)
width = int(width)
height = int(height)
# Line 3: +ve number means big endian, negative means little endian
line = f.readline().decode('latin-1')
BigEndian = True
if "-" in line:
BigEndian = False
# Slurp all binary data
samples = width * height * channels
buffer = f.read(samples * 4)
# Unpack floats with appropriate endianness
if BigEndian:
fmt = ">"
else:
fmt = "<"
fmt = fmt + str(samples) + "f"
img = unpack(fmt, buffer)
img = np.reshape(img, (height, width))
img = np.flipud(img)
# quit()
return img, height, width
x = cv2.imread('/data/schuster/BMW_SceneFlow/DATA/Freiburg/FlyingThings3D/frames_finalpass/TRAIN/A/0000/left/0006.png')
y = torch.from_numpy(np.asarray(cv2.imread('/data/schuster/BMW_SceneFlow/DATA/Freiburg/FlyingThings3D/frames_finalpass/TRAIN/A/0000/right/0006.png')).transpose(2,0,1))
disp, _, _ = readPFM('/data/schuster/BMW_SceneFlow/DATA/Freiburg/FlyingThings3D/disparity/TRAIN/A/0000/left/0006.pfm')
disp = torch.from_numpy(disp.copy())
# print(x.size(), disp.size())
output = warp(y.unsqueeze(dim=0).cuda().float(),disp.unsqueeze(dim=0).cuda().float())
cv2.imwrite('warped.png', output.squeeze().detach().cpu().numpy().transpose(1,2,0))
cv2.imwrite('left.png', x) |
#Project Euler Question 58
#Spiral primes
import math
def prime_check(x):
if x > 2:
if x % 2 == 0:
return False
else:
for factor in range(3, int(math.sqrt(x) + 1), 2):
if (x % factor) == 0:
return False
else:
return True
elif x == 2:
return True
else:
return False
def spiral_numbers():
spiral_list = set()
prime_list = set()
count = -1
gap = 2
x = 1
side_length = 1
while True:
spiral_list.add(x)
if prime_check(x) is True:
prime_list.add(x)
count += 1
if count == 4:
gap += 2
side_length += 2
count = 0
percent = (len(prime_list) / len(spiral_list))
if percent < 0.1:
return side_length
x += gap
print (spiral_numbers()) |
import os
import coverage
import unittest
def run_all_tests(test_modules):
suite = unittest.TestSuite()
for t in test_modules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ['suite'])
suite_fn = getattr(mod, 'suite')
suite.addTest(suite_fn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
# 删除测试文件
test_files = ['config.cfg', 'cr-config.cfg', 'config_dump.cfg', 'test_key.dat']
for filename in test_files:
if os.path.exists(filename):
os.remove(filename)
test_modules = [
'ni.test.test_codec',
'ni.test.test_config',
'ni.test.test_encryption',
'ni.test.test_validator',
'ni.test.test_tools'
]
# 执行测试用例
cov = coverage.Coverage()
cov.start()
run_all_tests(test_modules)
cov.stop()
cov.save()
cov.html_report(directory='htmlcov') |
keyboard.send_keys("<ctrl>+<alt>+<delete>") |
import os
from config import config
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials, space_eval
from hyperopt.mongoexp import MongoTrials
from keras.callbacks import ModelCheckpoint
def info(title):
""" Prints out some threads stats. """
print title
print 'module name: ' + __name__
print 'parent process: ' + str(os.getppid())
print 'process id: ' + str(os.getpid())
def gen_layer(num_ls, num_l, width):
""" Given the number of layers, the layer number and width, returns a choice of activation for the layer. """
return (width, hp.choice('layer activation ' + str(num_ls) + str(num_l) + str(width), ['relu', 'sigmoid', 'tanh', 'linear']))#NEQP (Yo creo que esto podemos optimizarlo todavia mas dependiendo del tipo de data que se quiera entrenar, ya que hay algunas activaciones que no tienen mucha utilidad para cierto tipo de data)
def layer_choice(num):
""" Given the number of layers, returns a choice of differing width and activations layers. """
layer_neurons = []
for i in range(num):
# Choose number of neurons from 1 - 10.
layer_neurons.append(hp.choice('num_layers ' + str(num) + ' layer ' + str(i), [('num_layers ' + str(num) + ' layer ' + str(i) + ' width ' + str(x), gen_layer(num, i, x)) for x in range(1,11)]))#NEQP (Tambien siento que de 1 a 10 neuronas igual y no es la unica opcion que deberiamos de tener, ya que para cierto tipo de data muy compleja, mas que un numero elevado de layers tal vez necesitemos no tantas layers pero mas neuronas en cada layer)
return layer_neurons
def train(air_model, train_epochs=20):
""" Runs TPE black box optimization of the neural network to use.
After evaluating all points, it saves the best model to disk and sets the status flag as TRAINED.
"""
from db import get_model, save_model
from model import ModelStatus
info('Running training on new process')
air_model.status = ModelStatus.TRAINING
save_model(air_model)
fspace = {
'optimizer': hp.choice('optimzer', ['rmsprop', 'adagrad']), #NEQP (Supongo que si, pero es a proposito que diga 'optimzer'?)
'layers': hp.choice('layers', [(str(x), layer_choice(x)) for x in range(10)]) # Choose from 0 to 9 layers.
}
if config.DISTRIBUTED_HYPEROPT:
# TODO: Probably not send all model from json. Just send the ids and make the worker fetch it from the DB.
fspace['model_json'] = air_model.to_json()
trials = MongoTrials('mongo://localhost:27017/testdb/jobs', exp_key='userid.trainingid', workdir='/home/paezand/pusher/bottle_air')
best = fmin(fn=run_model_fn, space=fspace, trials=trials, algo=tpe.suggest, max_evals=train_epochs)
# Run workers with
# hyperopt-mongo-worker --mongo=$mongodbURL/testdb --poll-interval=0.1 --workdir=$bottle_air_dir
else:
trials = Trials() #NEQP (Checaste la opcion de hacer parallel search con MongoDB?)
best = fmin(fn=air_model.run_model(), space=fspace, algo=tpe.suggest, max_evals=train_epochs, trials=trials)
print 'best:', space_eval(fspace, best)
print 'trials:'
for trial in trials.trials[:2]:
print trial
model_fn = air_model.run_model(persist=True)
model_fn(space_eval(fspace, best)) # Train and persist best model.
print 'Training finished'
air_model.status = ModelStatus.TRAINED
air_model.best_model = best
save_model(air_model)
## TRAINING FUNCTION FOR DISTRIBUTED HYPEROPT ##
def run_model_fn(hp):
""" Definition to be evaluated by the black box optimizer.
Params: hyperparameter dictionary.
"""
from model import Model
import json
import numpy as np
from keras.models import Sequential
from keras.layers import Embedding, Dense, Activation, Merge, Flatten, Dropout
from keras.preprocessing.sequence import pad_sequences
from keras_utils import single_activation
import tensorflow as tf
import os
cwd = os.getcwd()
print cwd
air_model = Model()
air_model.from_json(hp["model_json"])
output_headers = [outputs for outputs in air_model.data.iterkeys() if outputs.startswith('output_')]
if not output_headers:
raise ValueError('No outputs defined!')
# Process string features.
if not air_model.string_features:
air_model.string_features = []
for header, typ in air_model.types.iteritems():
if typ != 'str':
continue
# Every string feature is treated as a list of words.
word_list = [x.split() for x in air_model.data[header]]
dict_, _ = air_model.process_text_feature(word_list)
assert len(dict_) > 0, 'Dict is empty.'
air_model.embedding_dicts[header] = dict_ #NEQP (Si haces un nuevo dict para cada columna de strings, no hay entonces idx que se repiten para diferentes palabras?)
lengths = [len(words) for words in word_list]
lengths.sort()
input_size = lengths[int(np.round((len(lengths)-1) * 0.95))] #NEQP (Para que es este calculo?)
if input_size == 0:
print 'WARNING: input_size is 0 for ' + header
input_size = 1
for idx, words in enumerate(word_list):
# Strings to integers. Pad sequences with zeros so that all of them have the same size.
word_list[idx] = pad_sequences([[dict_[word] for word in words]],
maxlen=input_size, padding='post',
truncating='post')[0].tolist() #NEQP (Y esto que pex?)
air_model.string_features.append((header, word_list))
# Build models.
# Merge all inputs into one model.
def init_model(air_model):
feature_models = []
total_input_size = 0
i = 0
for tup in air_model.string_features:
header = tup[0]
word_list = tup[1]
sequence_length = len(word_list[0])
embedding_size = int(np.round(np.log10(len(air_model.embedding_dicts[header]))))
embedding_size = embedding_size if embedding_size > 0 else 1
model = Sequential(name='str_model_' + str(len(feature_models)))
model.add(Embedding(len(air_model.embedding_dicts[header].keys()), embedding_size, input_length=sequence_length, name='embedding_model_' + str(len(feature_models))))
model.add(Flatten(name='flatten_model_' + str(len(feature_models))))
total_input_size += embedding_size * len(word_list[0]) #NEQP (Si hay un embedding por palabra: realmente los embeddings podran generar un vector de significado? Y, no se le da mucho peso a las strings por sobre los integers?)
feature_models.append(model)
numeric_inputs = len(air_model.data) - len(air_model.string_features) - len(output_headers)
if numeric_inputs:
num_model = Sequential(name='num_model_' + str(len(feature_models)))
num_model.add(Dense(numeric_inputs, input_shape=(numeric_inputs,), name='dense_model_' + str(len(feature_models))))
total_input_size += numeric_inputs
feature_models.append(num_model)
merged_model = Sequential()
if len(feature_models) < 1:
raise ValueError('No models built, no inputs?')
elif len(feature_models) == 1:
merged_model = feature_models[0]
else:
merged_model.add(Merge(feature_models, mode='concat', concat_axis=1))
return merged_model, total_input_size
# We will build in total DEEP_RANGE*WIDE_RANGE models.
optimizer = hp['optimizer']
layers = hp['layers']
dropout = 0.2 # hp['dropout'] #NEQP (No estaria bueno igual que hyperopt tambien optimizara estos hyperparamenters?)
batch_size = 128 # hp['batch_size']
model, input_size = init_model(air_model)
# We will add 'depth' layers with 'net_width' neurons.
depth = len(layers[1])
for i in range(depth):
layer_activation = layers[1][i][1][1]
layer_width = layers[1][i][1][0] #NEQP (Creo que es una buena practica variar el width de las layers. Normalmente se usan variaciones tipo 10-20-40-20-10)
if i == 0 and depth != 1:
model.add(Dense(layer_width, input_shape=(input_size,), name='layer_model_' + str(i)))
model.add(Activation(layer_activation))
model.add(Dropout(dropout))
elif i == depth - 1:
model.add(Dense(len(output_headers), input_shape=(len(layers[1][i-1][1]),), name='layer_model_' + str(i)))
else:
model.add(Dense(layer_width, input_shape=(len(layers[1][i-1][1]),), name='layer_model_' + str(i)))
model.add(Activation(layer_activation))
model.add(Dropout(dropout))
if not depth:
model.add(Dense(len(output_headers), input_shape=(input_size,), name='layer_model_0'))
# No Activation in the end for now... Assuming regression always.
model.compile(loss='mse',
optimizer=optimizer,
metrics=['accuracy'])
nb_epoch = 30
model_name = str(hp).replace('{', '').replace('}', '')
X_train, Y_train = air_model.get_data_sets(sample=True) # Only use a small sample.
VAL_SPLIT = 0.1 # Split of data to use as validation.
print 'Sizes: ' + str(len(X_train)) + ', ' + str(X_train[0].shape) + ' ' + str(len(Y_train))
with tf.Session() as sess:
history = model.fit(X_train, Y_train,
batch_size=batch_size, #NEQP (Entonces X_train como esta organizado? Cual es su forma?)
nb_epoch=nb_epoch,
shuffle=True,
validation_split=VAL_SPLIT)
total_dataset_loss = VAL_SPLIT * history.history['val_loss'][-1]
+ (1 - VAL_SPLIT) * history.history['loss'][-1]
return {'loss': total_dataset_loss, 'status': STATUS_OK}
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
class ProductPage:
def __init__(self, driver):
self.driver = driver
# self.wait = WebDriverWait(driver, 10)
def add_one_product(app):
# 1) otwórz stronę jakiegoś towaru,
app.driver.find_element(By.XPATH, "//*[@id='box-most-popular']//a[@class='link'][@title!='Yellow Duck']").click()
# 2) dodaj go do koszyka,
app.wait.until(EC.presence_of_element_located((By.NAME, "add_cart_product")))
cart_quantity_before = int(app.driver.find_element(By.XPATH, "//*[@id='cart']//span[@class='quantity']").text)
app.driver.find_element_by_name("add_cart_product").click()
# 3) poczekaj, aż licznik towarów w koszyku odświeży się,
# todo: I would like to wait until script ends, but it doesn't work :( Please explain how to wait until finish of function updateCart() from http://localhost/litecart/ext/jquery/jquery-1.12.4.min.js? I've found an example with execute_script in LMS but I don't know how to debug methods/arguments of scripts used in application under test??
# wait.until(lambda driver: wd.execute_script("return jQuery.active == 0")) # return window.jQuery!=undefined && jQuery.active==0
# HINT: You also have to:
# 1) After adding an item to the cart, wait until the number of items increases by one
app.wait.until(EC.presence_of_element_located(
(By.XPATH, "//*[@id='cart']//span[@class='quantity'][.='" + str(cart_quantity_before + 1) + "']")))
# time.sleep(2)
# 4) wróć na stronę główną i powtórz poprzednie kroki jeszcze dwa razy, aby w sumie w koszyku były trzy sztuki towaru,
# wd.get("http://localhost/litecart/")
# CustomerLandingPage.open(app)
app.driver.find_element_by_link_text("Home").click()
def remove_all_products(app):
# 5) otwórz koszyk (kliknij na link Checkout w prawem górnym rogu),
app.driver.find_element_by_link_text("Checkout »").click()
app.wait.until(EC.presence_of_element_located((By.NAME, "remove_cart_item")))
order_summary_len = len(
app.driver.find_elements(By.XPATH, "//*[@id='order_confirmation-wrapper']/table/tbody/tr/td[@class='item']"))
# 6) usuń wszystkie towary z koszyka, jeden za drugim. Po każdym usunięciu poczekaj, aż odświeży się tabela na dole.
for j in range(order_summary_len):
order_summary_before_len = len(
app.driver.find_elements(By.XPATH,
"//*[@id='order_confirmation-wrapper']/table/tbody/tr/td[@class='item']"))
# HINT 2+: In addition, the page has a "carousel" that displays the contents of the cart, products are displayed and hidden, this may affect the ability to click the delete button. To stop the carousel, you can first click on the small product icon below it, then this product will open and you can safely press the delete button, it will not hide.
if app.driver.find_element(By.XPATH, "//*[@id='box-checkout-cart']//li/a"):
app.driver.find_element(By.XPATH, "//*[@id='box-checkout-cart']//li/a").click()
app.driver.find_element(By.NAME, "remove_cart_item").click() # .submit()
# todo: how to wait for table refresh and how to detect precisely a state of the last element erasure? Is there better way to do this without counting table rows before and after each remove action??
# wait.until(lambda d: d.find_element_by_name("remove_cart_item"))
# HINT: You also have to:
# 2) After clicking on the button to delete an item, wait for the table to update.
app.wait.until_not(EC.presence_of_element_located((By.XPATH,
"(//*[@id='order_confirmation-wrapper']/table/tbody/tr/td[@class='item'])[" + str(
order_summary_before_len) + "']"))) |
"""
This script will test to ensure Python 2.7 and Python 3.6+ are supported.
"""
import sys
import os
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_DATA = os.path.abspath(PATH_HERE+"../../../data/abfs/")
PATH_SRC = os.path.abspath(PATH_HERE+"../../../src/")
sys.path.insert(0, PATH_SRC)
print("PYTHON %d.%d"%(sys.version_info.major, sys.version_info.minor))
import pyabf
abf = pyabf.ABF(PATH_DATA+"/model_vc_step.abf")
print(abf.sweepY) |
import unittest
from pyowm.commons.databoxes import ImageType, Satellite
class TestImageType(unittest.TestCase):
def test_repr(self):
instance = ImageType('PDF', 'application/pdf')
repr(instance)
class TestSatellite(unittest.TestCase):
def test_repr(self):
instance = Satellite('Terrasat', 'tst')
repr(instance)
|
import asyncio
import logging
import uuid
from base64 import b64decode
from datetime import datetime
from importlib import import_module
import marshmallow as ma
from app.objects.c_ability import Ability, AbilitySchema
from app.objects.secondclass.c_executor import Executor, ExecutorSchema
from app.objects.secondclass.c_fact import Fact, FactSchema, OriginType
from app.objects.secondclass.c_relationship import RelationshipSchema
from app.objects.secondclass.c_visibility import Visibility, VisibilitySchema
from app.utility.base_object import BaseObject
from app.utility.base_parser import PARSER_SIGNALS_FAILURE
from app.utility.base_service import BaseService
NO_STATUS_SET = object()
class LinkSchema(ma.Schema):
class Meta:
unknown = ma.EXCLUDE
id = ma.fields.String(missing='')
paw = ma.fields.String()
command = ma.fields.String()
status = ma.fields.Integer(missing=-3)
score = ma.fields.Integer(missing=0)
jitter = ma.fields.Integer(missing=0)
decide = ma.fields.DateTime(format='%Y-%m-%d %H:%M:%S')
pin = ma.fields.Integer(missing=0)
pid = ma.fields.String()
facts = ma.fields.List(ma.fields.Nested(FactSchema()))
relationships = ma.fields.List(ma.fields.Nested(RelationshipSchema()))
used = ma.fields.List(ma.fields.Nested(FactSchema()))
unique = ma.fields.String()
collect = ma.fields.DateTime(format='%Y-%m-%d %H:%M:%S', default='')
finish = ma.fields.String()
ability = ma.fields.Nested(AbilitySchema())
executor = ma.fields.Nested(ExecutorSchema())
cleanup = ma.fields.Integer(missing=0)
visibility = ma.fields.Nested(VisibilitySchema())
host = ma.fields.String(missing=None)
output = ma.fields.String()
deadman = ma.fields.Boolean()
agent_reported_time = ma.fields.DateTime(format='%Y-%m-%d %H:%M:%S', missing=None)
@ma.pre_load()
def fix_ability(self, link, **_):
if 'ability' in link and isinstance(link['ability'], Ability):
ability = link.pop('ability')
link['ability'] = ability.schema.dump(ability)
return link
@ma.pre_load()
def fix_executor(self, link, **_):
if 'executor' in link and isinstance(link['executor'], Executor):
executor = link.pop('executor')
link['executor'] = executor.schema.dump(executor)
return link
@ma.pre_load()
def remove_properties(self, data, **_):
data.pop('unique', None)
data.pop('decide', None)
data.pop('pid', None)
data.pop('facts', None)
data.pop('collect', None)
data.pop('finish', None)
data.pop('visibility', None)
data.pop('output', None)
data.pop('used.unique', None)
return data
@ma.post_load()
def build_link(self, data, **kwargs):
return None if kwargs.get('partial') is True else Link(**data)
@ma.post_dump()
def prepare_dump(self, data, **_):
if data.get('agent_reported_time', None) is None:
data.pop('agent_reported_time', None)
return data
class Link(BaseObject):
schema = LinkSchema()
display_schema = LinkSchema(exclude=['jitter'])
load_schema = LinkSchema(exclude=['decide', 'pid', 'facts', 'unique', 'collect', 'finish', 'visibility',
'output', 'used.unique'])
RESERVED = dict(origin_link_id='#{origin_link_id}')
EVENT_EXCHANGE = 'link'
EVENT_QUEUE_STATUS_CHANGED = 'status_changed'
@property
def raw_command(self):
return self.decode_bytes(self.command) if self.command else ''
@property
def unique(self):
return self.hash('%s' % self.id)
@property
def pin(self):
return self._pin
@pin.setter
def pin(self, p):
self._pin = p
@property
def states(self):
return dict(HIGH_VIZ=-5,
UNTRUSTED=-4,
EXECUTE=-3,
DISCARD=-2,
PAUSE=-1,
SUCCESS=0,
ERROR=1,
TIMEOUT=124)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
previous_status = getattr(self, '_status', NO_STATUS_SET)
self._status = value
if previous_status is NO_STATUS_SET:
return
if previous_status == value:
return
self._emit_status_change_event(
from_status=previous_status,
to_status=value
)
@classmethod
def is_global_variable(cls, variable):
return variable in cls.RESERVED
def __init__(self, command='', paw='', ability=None, executor=None, status=-3, score=0, jitter=0, cleanup=0, id='',
pin=0, host=None, deadman=False, used=None, relationships=None, agent_reported_time=None):
super().__init__()
self.id = str(id)
self.command = command
self.command_hash = None
self.paw = paw
self.host = host
self.cleanup = cleanup
self.ability = ability
self.executor = executor
self.status = status
self.score = score
self.jitter = jitter
self.decide = datetime.now()
self.pid = None
self.collect = None
self.finish = None
self.facts = []
self.relationships = relationships if relationships else []
self.used = used if used else []
self.visibility = Visibility()
self._pin = pin
self.output = False
self.deadman = deadman
self.agent_reported_time = agent_reported_time
def __eq__(self, other):
if isinstance(other, Link):
return other.paw == self.paw and other.ability.ability_id == self.ability.ability_id \
and other.used == self.used
return False
async def parse(self, operation, result):
if self.status != 0:
return
for parser in self.executor.parsers:
source_facts = operation.source.facts if operation else []
try:
relationships = await self._parse_link_result(result, parser, source_facts)
if len(relationships) > 0 and relationships[0] == PARSER_SIGNALS_FAILURE:
logging.getLogger('link').debug(f'link {self.id} (ability id={self.ability.ability_id}) encountered '
f'an error during execution, which was caught during parsing.')
self.status = self.states['ERROR']
relationships = [] # we didn't actually get anything out of this, so let's reset
else:
await self._create_relationships(relationships, operation)
await update_scores(operation, increment=len(relationships), used=self.used, facts=self.facts)
except Exception as e:
logging.getLogger('link').debug('error in %s while parsing ability %s: %s'
% (parser.module, self.ability.ability_id, e))
def apply_id(self, host):
self.id = str(uuid.uuid4())
self.host = host
self.replace_origin_link_id()
def can_ignore(self):
return self.status in [self.states['DISCARD'], self.states['HIGH_VIZ']]
def is_finished(self):
return self.status in [self.states['DISCARD'], self.states['SUCCESS'],
self.states['ERROR'], self.states['TIMEOUT']]
def is_valid_status(self, status):
return status in self.states.values()
def replace_origin_link_id(self):
decoded_cmd = self.decode_bytes(self.command)
self.command = self.encode_string(decoded_cmd.replace(self.RESERVED['origin_link_id'], self.id))
""" PRIVATE """
def _emit_status_change_event(self, from_status, to_status):
event_svc = BaseService.get_service('event_svc')
task = asyncio.get_event_loop().create_task(
event_svc.fire_event(
exchange=Link.EVENT_EXCHANGE,
queue=Link.EVENT_QUEUE_STATUS_CHANGED,
link=self.id,
from_status=from_status,
to_status=to_status
)
)
return task
async def _parse_link_result(self, result, parser, source_facts):
blob = b64decode(result).decode('utf-8')
parser_info = dict(module=parser.module, used_facts=self.used, mappers=parser.parserconfigs,
source_facts=source_facts)
p_inst = await self._load_module('Parser', parser_info)
return p_inst.parse(blob=blob)
@staticmethod
async def _load_module(module_type, module_info):
module = import_module(module_info['module'])
return getattr(module, module_type)(module_info)
async def _create_relationships(self, relationships, operation):
for relationship in relationships:
relationship.origin = operation.id if operation else self.id
await self._save_fact(operation, relationship.source, relationship.score, relationship.shorthand)
await self._save_fact(operation, relationship.target, relationship.score, relationship.shorthand)
if all((relationship.source.trait, relationship.edge)):
knowledge_svc_handle = BaseService.get_service('knowledge_svc')
await knowledge_svc_handle.add_relationship(relationship)
self.relationships.append(relationship)
async def _save_fact(self, operation, fact, score, relationship):
knowledge_svc_handle = BaseService.get_service('knowledge_svc')
all_facts = await operation.all_facts() if operation else self.facts
source = operation.id if operation else self.id
rl = [relationship] if relationship else []
if all([fact.trait, fact.value]):
if operation and operation.source:
if any([(fact.trait, fact.value) == (x.trait, x.value) for x in
await knowledge_svc_handle.get_facts(criteria=dict(source=operation.source.id))]):
source = operation.source.id
fact.source = source # Manual addition to ensure the check works correctly
if not await knowledge_svc_handle.check_fact_exists(fact, all_facts):
f_gen = Fact(trait=fact.trait, value=fact.value, source=source, score=score, collected_by=self.paw,
technique_id=self.ability.technique_id, links=[self.id], relationships=rl,
origin_type=OriginType.LEARNED)
self.facts.append(f_gen)
await knowledge_svc_handle.add_fact(f_gen)
else:
existing_fact = (await knowledge_svc_handle.get_facts(criteria=dict(trait=fact.trait,
value=fact.value,
source=fact.source)))[0]
if self.id not in existing_fact.links:
existing_fact.links.append(self.id)
if relationship not in existing_fact.relationships:
existing_fact.relationships.append(relationship)
await knowledge_svc_handle.update_fact(criteria=dict(trait=fact.trait, value=fact.value,
source=fact.source),
updates=dict(links=existing_fact.links,
relationships=existing_fact.relationships))
existing_local_record = [x for x in self.facts if x.trait == fact.trait and x.value == fact.value]
if existing_local_record:
existing_local_record[0].links = existing_fact.links
else:
self.facts.append(existing_fact)
async def update_scores(operation, increment, used, facts):
knowledge_svc_handle = BaseService.get_service('knowledge_svc')
for uf in used:
all_facts = await operation.all_facts() if operation else facts
for found_fact in all_facts:
if found_fact.unique == uf.unique:
found_fact.score += increment
await knowledge_svc_handle.update_fact(dict(trait=found_fact.trait, value=found_fact.value,
source=found_fact.source), dict(score=found_fact.score))
break
|
import logging
from athena import cluster
def make_cluster(cluster_settings, processes):
cluster_classes = {
"IPCluster": cluster.IPCluster,
"local": cluster.LocalCluster,
"multiprocessing": cluster.MultiprocessingCluster,
}
return cluster_classes[cluster_settings.cluster_type](
processes, cluster_settings)
class Runner(object):
def __init__(self, options):
self.options = options
self.cluster = None
def run_stage(self, stage, stage_name):
to_run = []
for step_chunk in stage.get_steps(self.options):
if step_chunk.needs_to_run():
to_run.append(step_chunk)
logging.info("{} {} {}".format("="*30, stage_name, "="*30))
if len(to_run) > 0:
logging.info("{} chunks to run. Starting...".format(len(to_run)))
processes = min(self.options.cluster_settings.processes, len(to_run))
cluster = make_cluster(self.options.cluster_settings, processes)
cluster.map(_run_chunk, to_run)
logging.info("--> {} completed.\n".format(stage_name))
else:
logging.info("--> 0 chunks need to be run. Skipping...\n")
if stage.deliver_message(self.options):
logging.info(stage.deliver_message(self.options))
def _run_chunk(chunk):
try:
chunk.start_logging()
except Exception, e:
print "== Error starting logging =="
print(e)
raise
try:
chunk.run()
chunk.finalize()
chunk.stop_logging()
except Exception, e:
chunk.logger.exception(e)
raise
if chunk.needs_to_run():
chunk.logger.error("Step {} failed to produce output.".format(chunk))
|
from selenium import webdriver
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get('https://quotes.toscrape.com/page/9/')
while True:
for div in driver.find_elements_by_css_selector('.quote'):
print(div.find_element_by_css_selector('.text').text)
print(div.find_element_by_css_selector('.author').text)
try:
driver.find_element_by_css_selector('.next a').click()
except:
break
driver.quit() |
# modify from sklearn (0.22.1)
import numpy as np
from .utils.multiclass import type_of_target
from .utils.validation import check_consistent_length, column_or_1d, assert_all_finite
from .utils.extmath import stable_cumsum
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
y axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
See also
--------
average_precision_score : Compute average precision from prediction scores
roc_curve : Compute Receiver operating characteristic (ROC) curve
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision
array([0.66666667, 0.5 , 1. , 1. ])
>>> recall
array([1. , 0.5, 0.5, 0. ])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
# Check to make sure y_true is valid
y_type = type_of_target(y_true)
if not (y_type == "binary" or
(y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
# classes.dtype.kind in ('O', 'U', 'S') is required to avoid
# triggering a FutureWarning by calling np.array_equal(a, b)
# when elements in the two arrays are not comparable.
classes = np.unique(y_true)
if (pos_label is None and (
classes.dtype.kind in ('O', 'U', 'S') or
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1])))):
classes_repr = ", ".join(repr(c) for c in classes)
raise ValueError("y_true takes value in {{{classes_repr}}} and "
"pos_label is not specified: either make y_true "
"take value in {{0, 1}} or {{-1, 1}} or "
"pass pos_label explicitly.".format(
classes_repr=classes_repr))
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
|
from pyrep.robots.mobiles.nonholonomic_base import NonHolonomicBase
class Pioneer_p3dx(NonHolonomicBase):
def __init__(self, count: int = 0):
super().__init__(count, 2, 'Pioneer_p3dx')
|
"""GitLab collector base classes."""
from abc import ABC
from collections.abc import Sequence
from dateutil.parser import parse
from base_collectors import SourceCollector
from collector_utilities.functions import match_string_or_regular_expression
from collector_utilities.type import URL, Job
from model import Entities, Entity, SourceResponses
class GitLabBase(SourceCollector, ABC): # pylint: disable=abstract-method
"""Base class for GitLab collectors."""
async def _get_source_responses(self, *urls: URL, **kwargs) -> SourceResponses:
"""Extend to follow GitLab pagination links, if necessary."""
all_responses = responses = await super()._get_source_responses(*urls, **kwargs)
while next_urls := self.__next_urls(responses):
# Retrieving consecutive big responses without reading the response hangs the client, see
# https://github.com/aio-libs/aiohttp/issues/2217
for response in responses:
await response.read()
all_responses.extend(responses := await super()._get_source_responses(*next_urls, **kwargs))
return all_responses
def _basic_auth_credentials(self) -> tuple[str, str] | None:
"""Override to return None, as the private token is passed as header."""
return None
def _headers(self) -> dict[str, str]:
"""Extend to add the private token, if any, to the headers."""
headers = super()._headers()
if private_token := self._parameter("private_token"):
headers["Private-Token"] = str(private_token)
return headers
@staticmethod
def __next_urls(responses: SourceResponses) -> list[URL]:
"""Return the next (pagination) links from the responses."""
return [URL(next_url) for response in responses if (next_url := response.links.get("next", {}).get("url"))]
class GitLabProjectBase(GitLabBase, ABC): # pylint: disable=abstract-method
"""Base class for GitLab collectors for a specific project."""
async def _gitlab_api_url(self, api: str) -> URL:
"""Return a GitLab API url for a project, if present in the parameters."""
url = await super()._api_url()
project = self._parameter("project", quote=True)
api_url = f"{url}/api/v4/projects/{project}" + (f"/{api}" if api else "")
sep = "&" if "?" in api_url else "?"
api_url += f"{sep}per_page=100"
return URL(api_url)
class GitLabJobsBase(GitLabProjectBase):
"""Base class for GitLab job collectors."""
async def _api_url(self) -> URL:
"""Override to return the jobs API."""
return await self._gitlab_api_url("jobs")
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the jobs from the responses."""
return Entities(
[
Entity(
key=job["id"],
name=job["name"],
url=job["web_url"],
build_status=job["status"],
branch=job["ref"],
stage=job["stage"],
build_date=str(parse(job.get("finished_at") or job["created_at"]).date()),
)
for job in await self.__jobs(responses)
]
)
async def __jobs(self, responses: SourceResponses) -> Sequence[Job]:
"""Return the jobs to count."""
def newer(job1: Job, job2: Job) -> Job:
"""Return the newer of the two jobs."""
return job1 if job1["created_at"] > job2["created_at"] else job2
jobs: dict[tuple[str, str, str], Job] = {}
for response in responses:
for job in await response.json():
key = job["name"], job["stage"], job["ref"]
jobs[key] = newer(job, jobs.get(key, job))
return [job for job in jobs.values() if self._count_job(job)]
def _count_job(self, job: Job) -> bool:
"""Return whether to count the job."""
return not match_string_or_regular_expression(
job["name"], self._parameter("jobs_to_ignore")
) and not match_string_or_regular_expression(job["ref"], self._parameter("refs_to_ignore"))
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This implements an in-memory cache.
We use an LRU to maintain the size of the cache, this is simple and only
tracks number of items in the cache.
We're using a dictionary and moving items to the top of the dictionary
when it's accessed. This relies on Python dictionaries being ordered.
"""
import io
from opteryx.storage import BaseBufferCache
class InMemoryCache(BaseBufferCache):
def __init__(self, **kwargs):
"""
Parameters:
size: int (optional)
The maximim number of items maintained in the cache.
"""
self._size = int(kwargs.get("size", 50))
self._cache = {}
def get(self, key):
# pop() will remove the item if it's in the dict, we will return it to the
# cache which will put it at the end of the list, that means the unused items
# will slowly creep to the end of the list.
value = self._cache.pop(key, None)
if value:
self._cache[key] = value
return io.BytesIO(value)
def set(self, key, value):
# add the new item to the top of the dict
self._cache[key] = value.read()
value.seek(0)
# if we're full, we want to remove the oldest item from the cache
if len(self._cache) == self._size:
# we want to remove the first item in the dict, we could convert to a list,
# but then we need to create a list, this is faster and uses less memory
self._cache.pop(next(iter(self._cache)))
|
import Adafruit_DHT
import time
import json
class DHT_Read():
""" Read Temperature and Humidity """
def __init__(self,pin=17):
self.humidity = None
self.temp = None
self.sensor = Adafruit_DHT.DHT11
self.pin=pin
def read(self):
try:
self.humidity, self.temp = Adafruit_DHT.read_retry(self.sensor,self.pin)
except:
print "Error in reading from Sensor"
if(self.humidity is not None and self.temp is not None):
data={}
data["temperature"]= self.temp
data["humidity"]= self.humidity
data["time"] = time.time()
json_file=json.dumps(data)
return json_file
|
import logging
import os
from unittest.mock import Mock, patch
import pytest
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.utils import temp_dir
from pip._internal.utils.logging import BrokenStdoutLoggingError
from pip._internal.utils.temp_dir import TempDirectory
@pytest.fixture
def fixed_time(utc):
with patch("time.time", lambda: 1547704837.040001):
yield
class FakeCommand(Command):
_name = "fake"
def __init__(self, run_func=None, error=False):
if error:
def run_func():
raise SystemExit(1)
self.run_func = run_func
super().__init__(self._name, self._name)
def main(self, args):
args.append("--disable-pip-version-check")
return super().main(args)
def run(self, options, args):
logging.getLogger("pip.tests").info("fake")
# Return SUCCESS from run if run_func is not provided
if self.run_func:
return self.run_func()
else:
return SUCCESS
class FakeCommandWithUnicode(FakeCommand):
_name = "fake_unicode"
def run(self, options, args):
logging.getLogger("pip.tests").info(b"bytes here \xE9")
logging.getLogger("pip.tests").info(b"unicode here \xC3\xA9".decode("utf-8"))
class TestCommand:
def call_main(self, capsys, args):
"""
Call command.main(), and return the command's stderr.
"""
def raise_broken_stdout():
raise BrokenStdoutLoggingError()
cmd = FakeCommand(run_func=raise_broken_stdout)
status = cmd.main(args)
assert status == 1
stderr = capsys.readouterr().err
return stderr
def test_raise_broken_stdout(self, capsys):
"""
Test raising BrokenStdoutLoggingError.
"""
stderr = self.call_main(capsys, [])
assert stderr.rstrip() == "ERROR: Pipe to stdout was broken"
def test_raise_broken_stdout__debug_logging(self, capsys):
"""
Test raising BrokenStdoutLoggingError with debug logging enabled.
"""
stderr = self.call_main(capsys, ["-vv"])
assert "ERROR: Pipe to stdout was broken" in stderr
assert "Traceback (most recent call last):" in stderr
@patch("pip._internal.cli.req_command.Command.handle_pip_version_check")
def test_handle_pip_version_check_called(mock_handle_version_check):
"""
Check that Command.handle_pip_version_check() is called.
"""
cmd = FakeCommand()
cmd.main([])
mock_handle_version_check.assert_called_once()
def test_log_command_success(fixed_time, tmpdir):
"""Test the --log option logs when command succeeds."""
cmd = FakeCommand()
log_path = tmpdir.joinpath("log")
cmd.main(["fake", "--log", log_path])
with open(log_path) as f:
assert f.read().rstrip() == "2019-01-17T06:00:37,040 fake"
def test_log_command_error(fixed_time, tmpdir):
"""Test the --log option logs when command fails."""
cmd = FakeCommand(error=True)
log_path = tmpdir.joinpath("log")
cmd.main(["fake", "--log", log_path])
with open(log_path) as f:
assert f.read().startswith("2019-01-17T06:00:37,040 fake")
def test_log_file_command_error(fixed_time, tmpdir):
"""Test the --log-file option logs (when there's an error)."""
cmd = FakeCommand(error=True)
log_file_path = tmpdir.joinpath("log_file")
cmd.main(["fake", "--log-file", log_file_path])
with open(log_file_path) as f:
assert f.read().startswith("2019-01-17T06:00:37,040 fake")
def test_log_unicode_messages(fixed_time, tmpdir):
"""Tests that logging bytestrings and unicode objects
don't break logging.
"""
cmd = FakeCommandWithUnicode()
log_path = tmpdir.joinpath("log")
cmd.main(["fake_unicode", "--log", log_path])
@pytest.mark.no_auto_tempdir_manager
def test_base_command_provides_tempdir_helpers():
assert temp_dir._tempdir_manager is None
assert temp_dir._tempdir_registry is None
def assert_helpers_set(options, args):
assert temp_dir._tempdir_manager is not None
assert temp_dir._tempdir_registry is not None
return SUCCESS
c = Command("fake", "fake")
c.run = Mock(side_effect=assert_helpers_set)
assert c.main(["fake"]) == SUCCESS
c.run.assert_called_once()
not_deleted = "not_deleted"
@pytest.mark.parametrize("kind,exists", [(not_deleted, True), ("deleted", False)])
@pytest.mark.no_auto_tempdir_manager
def test_base_command_global_tempdir_cleanup(kind, exists):
assert temp_dir._tempdir_manager is None
assert temp_dir._tempdir_registry is None
class Holder:
value = None
def create_temp_dirs(options, args):
c.tempdir_registry.set_delete(not_deleted, False)
Holder.value = TempDirectory(kind=kind, globally_managed=True).path
return SUCCESS
c = Command("fake", "fake")
c.run = Mock(side_effect=create_temp_dirs)
assert c.main(["fake"]) == SUCCESS
c.run.assert_called_once()
assert os.path.exists(Holder.value) == exists
@pytest.mark.parametrize("kind,exists", [(not_deleted, True), ("deleted", False)])
@pytest.mark.no_auto_tempdir_manager
def test_base_command_local_tempdir_cleanup(kind, exists):
assert temp_dir._tempdir_manager is None
assert temp_dir._tempdir_registry is None
def create_temp_dirs(options, args):
c.tempdir_registry.set_delete(not_deleted, False)
with TempDirectory(kind=kind) as d:
path = d.path
assert os.path.exists(path)
assert os.path.exists(path) == exists
return SUCCESS
c = Command("fake", "fake")
c.run = Mock(side_effect=create_temp_dirs)
assert c.main(["fake"]) == SUCCESS
c.run.assert_called_once()
|
import click
from globus_search_cli.config import get_search_client
from globus_search_cli.parsing import globus_cmd, index_argument
from globus_search_cli.printing import format_output
@globus_cmd("delete", help="Delete a role (requires admin or owner)")
@index_argument
@click.argument("ROLE_ID")
def delete_cmd(index_id, role_id):
search_client = get_search_client()
format_output(search_client.delete(f"/v1/index/{index_id}/role/{role_id}").data)
|
from .utils import upload_file, delete_file, get_cloudinary_resource_info
from .permissions import PostUserRateThrottle, SustainedRateThrottle
from .decorators import authentication_required
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, throttle_classes
from django.utils.text import slugify
from cloudinary.utils import api_sign_request
from math import floor
import uuid
from time import time
from cloudinary import config
@api_view(['POST'])
@authentication_required
@throttle_classes([PostUserRateThrottle, SustainedRateThrottle])
def SigGenAPIView(request):
"""
generate Cloudinary Upload Signature.\n
Requires Authentication.\n
------------------------\n
request body format:\n
{\n
"username":"",\n
"filename":"",\n
"upload_preset":""\n
}\n
-------------------------\n
response format:\n
{\n
"signature":"",\n
"timestamp":"",\n
"public_id":"",\n
"api_key":""\n
}\n
"""
timestamp = time()
secret = config().api_secret
api_key = config().api_key
public_id = str(uuid.uuid4())
public_id = public_id[0: floor(len(public_id)/3)]
public_id = '{0}-{1}-{2}'.format(slugify(request.data.get("username")),
slugify(request.data.get("filename")), public_id)
signature = api_sign_request({
"timestamp": timestamp,
"upload_preset": request.data.get("upload_preset"),
"public_id": public_id},
secret)
return Response({"signature": signature, "timestamp": timestamp, "public_id": public_id, "api_key": api_key})
@api_view(['POST'])
@authentication_required
@throttle_classes([PostUserRateThrottle, SustainedRateThrottle])
def GetCloudinaryResourceInfoAPIView(request):
"""
This gets the resource information about a video uploaded to cloudinary.\n
This view is used when a transform is applied to video.
to know if the transformation is ready so we can update the video url
to take advantage of the transformation, we make a call to this endpoint.\n
Requires Authentication.\n
------------------------\n
request body format:\n
{\n
"url":"string",\n
}\n
-------------------------\n
response format:\n
{\n
"result":{video resource json},\n
}\n
"""
try:
res = get_cloudinary_resource_info(request.data.get("url"))
return Response({"result": res}, status=status.HTTP_200_OK)
except Exception:
return Response({'result': 'failed to get cloudinary resource info'}, status=status.HTTP_502_BAD_GATEWAY)
@api_view(['POST'])
@authentication_required
@throttle_classes([PostUserRateThrottle, SustainedRateThrottle])
def DeleteFileAPIView(request):
"""
Delete the file with the provided url.\n
Requires Authentication.\n
------------------------\n
request body format:\n
{\n
"url":"string",\n
}\n
-------------------------\n
response format:\n
{\n
"result":"ok"},\n
}\n
"""
try:
res = delete_file(request.data.get("url"))
return Response({"result": res}, status=status.HTTP_200_OK)
except Exception:
return Response({'result': 'failed to delete file from media server'}, status=status.HTTP_502_BAD_GATEWAY)
@api_view(['POST'])
@authentication_required
@throttle_classes([PostUserRateThrottle, SustainedRateThrottle])
def UploadFileAPIView(request):
"""
Delete the file with the provided url.\n
Requires Authentication.\n
------------------------\n
request body format:\n
{\n
"key":"string concatenation of folder and filename",\n
"<filename>":<file being uploaded>,\n
}\n
-------------------------\n
response format:\n
{\n
"url":"url of uploaded file"},\n
}\n
"""
try:
key = request.data.get("key")
__, name = key.split("/")
file = request.data.get(name)
url = upload_file(file, key)
return Response({"url": url}, status=status.HTTP_200_OK)
except Exception:
return Response({'result': 'failed to delete file from media server'}, status=status.HTTP_502_BAD_GATEWAY)
@api_view(['POST'])
@authentication_required
def MediaSchemaAPIView(request):
"""
Use SchemaGenerator to generate Zubhub Media Server API Schema instead of get_schema_view.
this is neccessary because `get_schema_view` somehow ignores
some api endpoints even when told to generate schema for those.
Returns Media Server API schema.
"""
from rest_framework.schemas.openapi import SchemaGenerator
from django.urls import path
schema_url_patterns = [
path('upload-file/', UploadFileAPIView),
path('delete-file/', DeleteFileAPIView),
path('sigen/', SigGenAPIView),
path('get-cloudinary-resource-info/', GetCloudinaryResourceInfoAPIView),
path('media-schema/', MediaSchemaAPIView)
]
generator = SchemaGenerator(title='Zubhub Media Server API', patterns=schema_url_patterns)
return Response(generator.get_schema()) |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import warnings
import os
import re
from packaging.version import parse
# import pkg_resources
import sys
import skopt
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# __version__ = pkg_resources.get_distribution('skopt').version
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- Project information -----------------------------------------------------
project = 'scikit-optimize'
copyright = '2017 - 2020, The scikit-optimize contributors.'
author = 'The scikit-optimize contributors'
# The short X.Y version
version = parse(skopt.__version__).base_version
version = ".".join(version.split(".")[:2])
# The full version, including alpha/beta/rc tags
release = skopt.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc', 'sphinx.ext.linkcode',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.imgconverter',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
mathjax_path = ''
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdn.jsdelivr.net/npm/mathjax@3/es5/'
'tex-chtml.js')
autodoc_default_options = {
'members': True,
'inherited-members': True
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'templates', 'includes', 'themes']
default_role = 'literal'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'scikit-learn-modern'
html_theme_options = {'google_analytics': False,
'mathjax_path': mathjax_path}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_short_title = 'scikit-optimize'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'image/logo.png'
html_favicon = 'image/favicon.ico'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'index.html',
'documentation': 'documentation.html'} # redirects to index
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-optimizedoc'
html_copy_source = True
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'scikit-optimize.tex', 'scikit-optimize Documentation',
'The scikit-optimize Contributors.', 'manual'),
]
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'scikit-optimize', 'scikit-optimize Documentation',
[author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'scikit-optimize', 'scikit-optimize Documentation', author,
'scikit-optimize',
'One line description of project.', 'Miscellaneous'),
]
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
'sklearn': ('https://scikit-learn.org/stable/', None),
}
binder_branch = 'master'
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, 'r') as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
sphinx_gallery_conf = {
'doc_module': 'skopt',
'backreferences_dir': os.path.join('modules', 'generated'),
'show_memory': True,
'reference_url': {
'skopt': None},
'examples_dirs': ['../examples'],
'gallery_dirs': ['auto_examples'],
'default_thumb_file': 'image/logo.png',
'subsection_order': SubSectionTitleOrder('../examples'),
'filename_pattern': '',
'ignore_pattern': 'utils.py',
'binder': {
'org': 'scikit-optimize',
'repo': 'scikit-optimize',
'binderhub_url': 'https://mybinder.org',
'branch': binder_branch,
'dependencies': './binder/requirements.txt',
'use_jupyter_lab': True
},
# avoid generating too many cross links
'inspect_global_variables': False,
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_ask-and-tell_002.png': 600,
'sphx_glr_bayesian-optimization_004.png': 600,
'sphx_glr_strategy-comparison_002.png': 600,
'sphx_glr_visualizing-results_008.png': 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != 'html':
return
print('Removing methods from search index')
searchindex_path = os.path.join(app.builder.outdir, 'searchindex.js')
with open(searchindex_path, 'r') as f:
searchindex_text = f.read()
searchindex_text = re.sub(r'{__init__.+?}', '{}', searchindex_text)
searchindex_text = re.sub(r'{__call__.+?}', '{}', searchindex_text)
with open(searchindex_path, 'w') as f:
f.write(searchindex_text)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = 'scikit-optimize/scikit-optimize'
def setup(app):
# to hide/show the prompt in code examples:
app.connect('build-finished', make_carousel_thumbs)
app.connect('build-finished', filter_search_index)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('skopt',
'https://github.com/scikit-optimize/'
'scikit-optimize/blob/{revision}/'
'{package}/{path}#L{lineno}')
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
# Reduces the output of estimators
# sklearn.set_config(print_changed_only=True)
# -- Extension configuration -------------------------------------------------
|
#
# PySNMP MIB module Nortel-MsCarrier-MscPassport-DisdnNISMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-DisdnNISMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:29:46 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
mscDataSigChan, mscDataSigChanIndex = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChan", "mscDataSigChanIndex")
StorageType, RowStatus, DisplayString, Unsigned32 = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "StorageType", "RowStatus", "DisplayString", "Unsigned32")
NonReplicated, Link = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-TextualConventionsMIB", "NonReplicated", "Link")
mscPassportMIBs, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, TimeTicks, Gauge32, MibIdentifier, Bits, NotificationType, ModuleIdentity, ObjectIdentity, Unsigned32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "TimeTicks", "Gauge32", "MibIdentifier", "Bits", "NotificationType", "ModuleIdentity", "ObjectIdentity", "Unsigned32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Counter32", "IpAddress")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
disdnNISMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127))
mscDataSigChanNis = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13))
mscDataSigChanNisRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 1), )
if mibBuilder.loadTexts: mscDataSigChanNisRowStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisRowStatusTable.setDescription('This entry controls the addition and deletion of mscDataSigChanNis components.')
mscDataSigChanNisRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisRowStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisRowStatusEntry.setDescription('A single entry in the table represents a single mscDataSigChanNis component.')
mscDataSigChanNisRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisRowStatus.setDescription('This variable is used as the basis for SNMP naming of mscDataSigChanNis components. These components can be added and deleted.')
mscDataSigChanNisComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisComponentName.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisComponentName.setDescription("This variable provides the component's string name for use with the ASCII Console Interface")
mscDataSigChanNisStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisStorageType.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisStorageType.setDescription('This variable represents the storage type value for the mscDataSigChanNis tables.')
mscDataSigChanNisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscDataSigChanNisIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisIndex.setDescription('This variable represents the index for the mscDataSigChanNis tables.')
mscDataSigChanNisL2Table = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11), )
if mibBuilder.loadTexts: mscDataSigChanNisL2Table.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisL2Table.setDescription('This group represents the provisionable Layer 2 attributes of the Q931 CCITT protocol.')
mscDataSigChanNisL2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisL2Entry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisL2Entry.setDescription('An entry in the mscDataSigChanNisL2Table.')
mscDataSigChanNisT23 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 15)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisT23.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisT23.setDescription('This attribute specifies the layer2 enable request timer.')
mscDataSigChanNisT200 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 20)).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisT200.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisT200.setDescription('This attribute specifies the maximum time between a layer 2 frame and its acknowledgement')
mscDataSigChanNisN200 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)).clone(3)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisN200.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisN200.setDescription('This attribute specifies the maximum number of re-transmissions of a layer2 frame.')
mscDataSigChanNisT203 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(2, 40)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisT203.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisT203.setDescription('This attribute specifies the maximum time that a no layer 2 traffic situation can last. Expiry triggers a check on whether the far end is a live.')
mscDataSigChanNisN201 = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(4, 260)).clone(260)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisN201.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisN201.setDescription('This attribute specifies the maximum number of octets in an information field.')
mscDataSigChanNisCircuitSwitchedK = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 11, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 632)).clone(7)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisCircuitSwitchedK.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisCircuitSwitchedK.setDescription('This attribute specifies the maximum number of frames for B channel use.')
mscDataSigChanNisProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 13), )
if mibBuilder.loadTexts: mscDataSigChanNisProvTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisProvTable.setDescription('This group defines the general options of the d-channel signalling link.')
mscDataSigChanNisProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 13, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisProvEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisProvEntry.setDescription('An entry in the mscDataSigChanNisProvTable.')
mscDataSigChanNisSide = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 13, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("network", 1), ("user", 2))).clone('user')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisSide.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisSide.setDescription('This attribute specifies whether the layer 2 HDLC interface is the network or user side of the connection.')
mscDataSigChanNisOperTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 15), )
if mibBuilder.loadTexts: mscDataSigChanNisOperTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisOperTable.setDescription('This group provides the operational attributes for the signalling protocol.')
mscDataSigChanNisOperEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 15, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisOperEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisOperEntry.setDescription('An entry in the mscDataSigChanNisOperTable.')
mscDataSigChanNisActiveChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 15, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisActiveChannels.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisActiveChannels.setDescription('This attribute indicates the number of currently active channels. This includes data and voice channels.')
mscDataSigChanNisPeakActiveChannels = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 15, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisPeakActiveChannels.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisPeakActiveChannels.setDescription('This attribute indicates the maximum number of channels that have been active on this signalling channel during the last polling period.')
mscDataSigChanNisDChanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 15, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("outOfService", 0), ("establishing", 1), ("established", 2), ("enabling", 3), ("inService", 4), ("restarting", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisDChanStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisDChanStatus.setDescription('This attribute indicates the state of the D-channel. outOfService means that there is no layer 2 or layer 3 connectivity to the PBX. establishing means that the signalling channel is attempting to stage the layer 2. established means that the layer 2 is enabled. If the signalling channel stays in the established state, then it is waiting for a restart from the PBX. enabling means that the resources for processing calls are being initialized. If the signalling channel stays in the enabling state then it is waiting for a restart acknowledgement from the PBX. inService means that the resources for processing calls are available. restarting means that the resources for call processing are being rei- initialized.')
mscDataSigChanNisToolsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 16), )
if mibBuilder.loadTexts: mscDataSigChanNisToolsTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisToolsTable.setDescription('This group contains a series of operational attributes which turn on and off several kinds of tracing.')
mscDataSigChanNisToolsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 16, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisToolsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisToolsEntry.setDescription('An entry in the mscDataSigChanNisToolsTable.')
mscDataSigChanNisTracing = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 16, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisTracing.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisTracing.setDescription('This attribute defines which types of tracing are active for this signalling channel. The tracing messages are sent to the debug stream. To see the messages the agentQueue attribute in Col/debug must be greater than 0 and a Telnet NMIS session must list the debug stream in in its data streams (ex. set nmis telnet session/1 dataStreams debug). Different types of tracing can be enabled simultaneously. Note that tracing consumes additional CPU resources and will slow down call processing on a heavily loaded card. If there is message block exhaustion tracing will be suspended for a period and then automatically reenabled. An alarm is generated on tracing suspension and resumption. This mechanism protects the function processor against excessive numbers of tracing messages. Types of tracing include: protocolErrors - get details of any protocol errors which are occuring. Protocol errors are also reported in summary form as alarms. q931Summary - Summary of the Q.931 messages on the signalling link, including certain call details (calling number, called number, release codes). q931Hex - Q.931 messages displayed in hex format. Useful to determine protocol compliance in case of errors reported on local or remote ends. q931Symbolic - Q.931 messages parsed to give maximum detail. Useful for understanding content of messages flowing on the link. portHex - Messages in hex format being sent and received on the link. Description of bits: protocolErrors(0) q931Summary(1) q931Hex(2) q931Symbolic(3) portHex(4)')
mscDataSigChanNisFramer = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2))
mscDataSigChanNisFramerRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 1), )
if mibBuilder.loadTexts: mscDataSigChanNisFramerRowStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerRowStatusTable.setDescription('This entry controls the addition and deletion of mscDataSigChanNisFramer components.')
mscDataSigChanNisFramerRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 1, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisFramerIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisFramerRowStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerRowStatusEntry.setDescription('A single entry in the table represents a single mscDataSigChanNisFramer component.')
mscDataSigChanNisFramerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerRowStatus.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerRowStatus.setDescription('This variable is used as the basis for SNMP naming of mscDataSigChanNisFramer components. These components cannot be added nor deleted.')
mscDataSigChanNisFramerComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerComponentName.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerComponentName.setDescription("This variable provides the component's string name for use with the ASCII Console Interface")
mscDataSigChanNisFramerStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerStorageType.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerStorageType.setDescription('This variable represents the storage type value for the mscDataSigChanNisFramer tables.')
mscDataSigChanNisFramerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: mscDataSigChanNisFramerIndex.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerIndex.setDescription('This variable represents the index for the mscDataSigChanNisFramer tables.')
mscDataSigChanNisFramerProvTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 10), )
if mibBuilder.loadTexts: mscDataSigChanNisFramerProvTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerProvTable.setDescription('This group contains the base provisioning data for the Framer component. Application or hardware interface specific provisioning data is contained in other provisionable Framer groups.')
mscDataSigChanNisFramerProvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 10, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisFramerIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisFramerProvEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerProvEntry.setDescription('An entry in the mscDataSigChanNisFramerProvTable.')
mscDataSigChanNisFramerInterfaceName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 10, 1, 1), Link()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mscDataSigChanNisFramerInterfaceName.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerInterfaceName.setDescription("This attribute contains a hardware component name. The attribute associates the application with a specific link. This defines the module processor on which Framer's parent component (as well as Framer itself) will run.")
mscDataSigChanNisFramerStateTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 12), )
if mibBuilder.loadTexts: mscDataSigChanNisFramerStateTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerStateTable.setDescription('This group contains the three OSI State attributes. The descriptions generically indicate what each state attribute implies about the component. Note that not all the values and state combinations described here are supported by every component which uses this group. For component-specific information and the valid state combinations, refer to NTP 241-7001-150, Passport Operations and Maintenance Guide.')
mscDataSigChanNisFramerStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 12, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisFramerIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisFramerStateEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerStateEntry.setDescription('An entry in the mscDataSigChanNisFramerStateTable.')
mscDataSigChanNisFramerAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("locked", 0), ("unlocked", 1), ("shuttingDown", 2))).clone('unlocked')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerAdminState.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerAdminState.setDescription('This attribute indicates the OSI Administrative State of the component. The value locked indicates that the component is administratively prohibited from providing services for its users. A Lock or Lock - force command has been previously issued for this component. When the value is locked, the value of usageState must be idle. The value shuttingDown indicates that the component is administratively permitted to provide service to its existing users only. A Lock command was issued against the component and it is in the process of shutting down. The value unlocked indicates that the component is administratively permitted to provide services for its users. To enter this state, issue an Unlock command to this component.')
mscDataSigChanNisFramerOperationalState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerOperationalState.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerOperationalState.setDescription('This attribute indicates the OSI Operational State of the component. The value enabled indicates that the component is available for operation. Note that if adminState is locked, it would still not be providing service. The value disabled indicates that the component is not available for operation. For example, something is wrong with the component itself, or with another component on which this one depends. If the value is disabled, the usageState must be idle.')
mscDataSigChanNisFramerUsageState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 12, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("idle", 0), ("active", 1), ("busy", 2))).clone('idle')).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerUsageState.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerUsageState.setDescription('This attribute indicates the OSI Usage State of the component. The value idle indicates that the component is not currently in use. The value active indicates that the component is in use and has spare capacity to provide for additional users. The value busy indicates that the component is in use and has no spare operating capacity for additional users at this time.')
mscDataSigChanNisFramerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13), )
if mibBuilder.loadTexts: mscDataSigChanNisFramerStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerStatsTable.setDescription('This group contains the operational statistics data for a Framer component.')
mscDataSigChanNisFramerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1), ).setIndexNames((0, "Nortel-MsCarrier-MscPassport-DataIsdnMIB", "mscDataSigChanIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisIndex"), (0, "Nortel-MsCarrier-MscPassport-DisdnNISMIB", "mscDataSigChanNisFramerIndex"))
if mibBuilder.loadTexts: mscDataSigChanNisFramerStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerStatsEntry.setDescription('An entry in the mscDataSigChanNisFramerStatsTable.')
mscDataSigChanNisFramerFrmToIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerFrmToIf.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerFrmToIf.setDescription('This attribute counts the number of frames transmitted to the link interface by Framer. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerFrmFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerFrmFromIf.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerFrmFromIf.setDescription('This attribute counts the number of frames received from the link interface by Framer. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerOctetFromIf = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerOctetFromIf.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerOctetFromIf.setDescription('The number of bytes received from the link interface by Framer.')
mscDataSigChanNisFramerAborts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerAborts.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerAborts.setDescription('This attribute counts the total number of aborts received. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerCrcErrors.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerCrcErrors.setDescription('This attribute counts the total number of frames with CRC errors. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerLrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerLrcErrors.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerLrcErrors.setDescription('This attribute counts the total number of frames with LRC errors. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerNonOctetErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerNonOctetErrors.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerNonOctetErrors.setDescription('This attribute counts the total number of frames that were non octet aligned. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerOverruns = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerOverruns.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerOverruns.setDescription('This attribute counts the total number of frames received from the link for which overruns occurred. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerUnderruns = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerUnderruns.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerUnderruns.setDescription('This attribute counts the total number of frames transmitted to the link for which underruns occurred. This count wraps to zero after reaching its maximum value.')
mscDataSigChanNisFramerLargeFrmErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 120, 13, 2, 13, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscDataSigChanNisFramerLargeFrmErrors.setStatus('mandatory')
if mibBuilder.loadTexts: mscDataSigChanNisFramerLargeFrmErrors.setDescription('This attribute counts the total number of frames received which were too large. The frame was longer than 500 bytes. This count wraps to zero after reaching its maximum value.')
disdnNISGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 1))
disdnNISGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 1, 1))
disdnNISGroupCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 1, 1, 3))
disdnNISGroupCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 1, 1, 3, 2))
disdnNISCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 3))
disdnNISCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 3, 1))
disdnNISCapabilitiesCA02 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 3, 1, 3))
disdnNISCapabilitiesCA02A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 127, 3, 1, 3, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-DisdnNISMIB", mscDataSigChanNisOperTable=mscDataSigChanNisOperTable, mscDataSigChanNisFramerInterfaceName=mscDataSigChanNisFramerInterfaceName, mscDataSigChanNisDChanStatus=mscDataSigChanNisDChanStatus, mscDataSigChanNis=mscDataSigChanNis, mscDataSigChanNisFramerStatsTable=mscDataSigChanNisFramerStatsTable, mscDataSigChanNisFramer=mscDataSigChanNisFramer, mscDataSigChanNisProvTable=mscDataSigChanNisProvTable, mscDataSigChanNisFramerLargeFrmErrors=mscDataSigChanNisFramerLargeFrmErrors, disdnNISGroup=disdnNISGroup, disdnNISCapabilitiesCA02A=disdnNISCapabilitiesCA02A, mscDataSigChanNisRowStatus=mscDataSigChanNisRowStatus, mscDataSigChanNisFramerIndex=mscDataSigChanNisFramerIndex, mscDataSigChanNisL2Table=mscDataSigChanNisL2Table, mscDataSigChanNisT203=mscDataSigChanNisT203, mscDataSigChanNisFramerComponentName=mscDataSigChanNisFramerComponentName, mscDataSigChanNisFramerAdminState=mscDataSigChanNisFramerAdminState, mscDataSigChanNisFramerOperationalState=mscDataSigChanNisFramerOperationalState, mscDataSigChanNisFramerOctetFromIf=mscDataSigChanNisFramerOctetFromIf, mscDataSigChanNisFramerNonOctetErrors=mscDataSigChanNisFramerNonOctetErrors, mscDataSigChanNisTracing=mscDataSigChanNisTracing, mscDataSigChanNisActiveChannels=mscDataSigChanNisActiveChannels, mscDataSigChanNisCircuitSwitchedK=mscDataSigChanNisCircuitSwitchedK, mscDataSigChanNisFramerStateEntry=mscDataSigChanNisFramerStateEntry, mscDataSigChanNisFramerFrmFromIf=mscDataSigChanNisFramerFrmFromIf, disdnNISGroupCA=disdnNISGroupCA, mscDataSigChanNisFramerProvEntry=mscDataSigChanNisFramerProvEntry, mscDataSigChanNisFramerProvTable=mscDataSigChanNisFramerProvTable, mscDataSigChanNisT200=mscDataSigChanNisT200, disdnNISGroupCA02=disdnNISGroupCA02, mscDataSigChanNisFramerOverruns=mscDataSigChanNisFramerOverruns, mscDataSigChanNisProvEntry=mscDataSigChanNisProvEntry, mscDataSigChanNisFramerCrcErrors=mscDataSigChanNisFramerCrcErrors, mscDataSigChanNisRowStatusTable=mscDataSigChanNisRowStatusTable, mscDataSigChanNisFramerUsageState=mscDataSigChanNisFramerUsageState, mscDataSigChanNisStorageType=mscDataSigChanNisStorageType, mscDataSigChanNisN201=mscDataSigChanNisN201, mscDataSigChanNisFramerFrmToIf=mscDataSigChanNisFramerFrmToIf, mscDataSigChanNisFramerRowStatusEntry=mscDataSigChanNisFramerRowStatusEntry, mscDataSigChanNisFramerUnderruns=mscDataSigChanNisFramerUnderruns, mscDataSigChanNisOperEntry=mscDataSigChanNisOperEntry, mscDataSigChanNisFramerStorageType=mscDataSigChanNisFramerStorageType, mscDataSigChanNisRowStatusEntry=mscDataSigChanNisRowStatusEntry, mscDataSigChanNisL2Entry=mscDataSigChanNisL2Entry, disdnNISCapabilities=disdnNISCapabilities, mscDataSigChanNisN200=mscDataSigChanNisN200, mscDataSigChanNisT23=mscDataSigChanNisT23, mscDataSigChanNisIndex=mscDataSigChanNisIndex, mscDataSigChanNisToolsEntry=mscDataSigChanNisToolsEntry, mscDataSigChanNisFramerAborts=mscDataSigChanNisFramerAborts, disdnNISCapabilitiesCA02=disdnNISCapabilitiesCA02, disdnNISCapabilitiesCA=disdnNISCapabilitiesCA, mscDataSigChanNisFramerStateTable=mscDataSigChanNisFramerStateTable, disdnNISGroupCA02A=disdnNISGroupCA02A, mscDataSigChanNisFramerStatsEntry=mscDataSigChanNisFramerStatsEntry, mscDataSigChanNisFramerLrcErrors=mscDataSigChanNisFramerLrcErrors, mscDataSigChanNisFramerRowStatusTable=mscDataSigChanNisFramerRowStatusTable, mscDataSigChanNisFramerRowStatus=mscDataSigChanNisFramerRowStatus, mscDataSigChanNisSide=mscDataSigChanNisSide, mscDataSigChanNisComponentName=mscDataSigChanNisComponentName, disdnNISMIB=disdnNISMIB, mscDataSigChanNisPeakActiveChannels=mscDataSigChanNisPeakActiveChannels, mscDataSigChanNisToolsTable=mscDataSigChanNisToolsTable)
|
import features
import transition
import conll
featSet = 3
classifier = features.getClassifier(str(featSet))
feature_names = features.featNames(featSet)
vec, (y, dict_classes, inv_dict_classes) = features.getStuff(featSet)
test_file = './swedish_talbanken05_test.conll'
column_names_2006 = ['id', 'form', 'lemma', 'cpostag', 'postag', 'feats', 'head', 'deprel', 'phead', 'pdeprel']
column_names_2006_test = ['id', 'form', 'lemma', 'cpostag', 'postag', 'feats']
def parse_ml(stack, queue, graph, trans):
if stack and trans[:2] == 'ra':
stack, queue, graph = transition.right_arc(stack, queue, graph, trans[3:])
return stack, queue, graph, 'ra'
if stack and trans[:2] == 'la':
stack, queue, graph = transition.left_arc(stack, queue, graph, trans[3:])
return stack, queue, graph, 'la'
if trans == 're':
stack, queue, graph = transition.reduce(stack, queue, graph)
return stack, queue, graph, 're'
if trans == 'sh':
stack, queue, graph = transition.shift(stack, queue, graph)
return stack, queue, graph, 'sh'
print(trans, "is not a valid action")
return None
def parse(sentence):
(stack, queue, graph) = features.initialStructures(sentence)
while queue:
feats = features.extract(stack, queue, graph, feature_names, sentence, featSet)
featVect = vec.transform(feats)
trans_nr = classifier.predict(featVect)[0]
trans = dict_classes[trans_nr]
stack, queue, graph, trans = parse_ml(stack, queue, graph, trans)
return graph
def forms(sentence):
res = ""
for word in sentence:
res += (word['form'] + " ")
return res
if __name__ == "__main__":
test_sentences = conll.read_sentences(test_file)
test_sentences = conll.split_rows(test_sentences, column_names_2006)
for sentence in test_sentences:
graph = parse(sentence)
for word in sentence:
if word['id'] in graph['heads'].keys():
word['head'] = graph['heads'][word['id']]
word['deprel'] = graph['deprels'][word['id']]
else:
word['head'] = '_'
word['deprel'] = '_'
conll.save("parsedTestSentences", test_sentences, column_names_2006)
|
from baconian.test.tests.set_up.setup import TestWithAll
from baconian.algo.dynamics.reward_func.reward_func import CostFunc
from baconian.envs.gym_env import make
import numpy as np
from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import GlobalDynamicsModel
from baconian.algo.policy.ilqr_policy import iLQRPolicy
from baconian.algo.dynamics.dynamics_model import DynamicsEnvWrapper
from baconian.algo.dynamics.terminal_func.terminal_func import RandomTerminalFunc
class DebugDynamics(GlobalDynamicsModel):
flag = 0.5
st = None
def _state_transit(self, state, action, **kwargs) -> np.ndarray:
return state + 0.0001 * action
# self.flag *= -1.0
# return np.ones_like(self.env_spec.obs_space.sample()) * self.flag
# return self.env_spec.obs_space.sample()
class DebuggingCostFunc(CostFunc):
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> float:
# return float(np.sum(action * action) + np.sum(state * state))
return float(np.sum(action + action * action))
class TestiLQRPolicy(TestWithAll):
def test_correctness(self):
env_id = 'Pendulum-v0'
env = make(env_id)
env_spec = EnvSpec(obs_space=env.observation_space,
action_space=env.action_space)
dyna = DebugDynamics(env_spec=env_spec)
dyna = DynamicsEnvWrapper(dynamics=dyna)
dyna.set_terminal_reward_func(terminal_func=RandomTerminalFunc(),
reward_func=DebuggingCostFunc())
policy = iLQRPolicy(env_spec=env_spec,
T=10,
delta=0.05,
iteration=2,
dynamics=dyna,
dynamics_model_train_iter=10,
cost_fn=DebuggingCostFunc())
st = env.reset()
dyna.st = np.zeros_like(st)
for i in range(10):
ac = policy.forward(st)
st, _, _, _ = env.step(st)
# st = dyna.step(action=ac, state=st)
print("analytical optimal action -0.5, cost -0.25")
print('state: {}, action: {}, cost {}'.format(st, ac, policy.iLqr_instance.cost_fn(state=st, action=ac,
new_state=None)))
|
from django.conf import settings
from django.test.signals import setting_changed
from rest_framework.settings import APISettings
USER_SETTINGS = getattr(settings, "REST_JWT_SSO", None)
DEFAULTS = {
"SIGN_SALT": None,
"SALT_METHOD": "rest_framework_jwt_sso.edjwt.algorithm.DefaultSalt",
"AUTH_TOKEN_PREFIX": "JWT_SSO",
# JWT Claim Settings
# --Header
"JWT_CLAIM_ALGORITHM": "alg",
"JWT_CLAIM_TYPE": "typ",
"JWT_CLAIM_VALIDATE_KEY": "key",
# --Payload
"JWT_CLAIM_ISSUER": "iss",
"JWT_CLAIM_SUBJECT": "sub",
"JWT_CLAIM_AUDIENCE": "aud",
"JWT_CLAIM_EXPIRATION_TIME": "exp",
"JWT_CLAIM_NOT_BEFORE": "nbf",
"JWT_CLAIM_ISSUED_AT": "iat",
"JWT_CLAIM_JWT_ID": "jti",
"JWT_CLAIM_USER_ID": "uid",
"JWT_CLAIM_EMAIL": "eml",
}
IMPORT_STRINGS = (
"SALT_METHOD",
)
jwt_sso_api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
def reload_api_settings(*args, **kwargs):
global jwt_sso_api_settings
setting, value = kwargs["setting"], kwargs["value"]
if setting == "REST_JWT_SSO":
jwt_sso_api_settings = APISettings(value, DEFAULTS, IMPORT_STRINGS)
setting_changed.connect(reload_api_settings)
|
from selenium import webdriver
from datetime import date
from openpyxl import load_workbook
data_atual1 = date.today()
# definir navegador e abrir o chrome
navegador = webdriver.Chrome("C:/Users/TEMP/Desktop/DolarPTAX/chromedriver.exe")
# entrar no site do banco central
navegador.get("https://www.bcb.gov.br/")
# acha o valor do Dolar PTAX de compra pelo xpath
navegador.find_element_by_xpath('/html/body/app-root/app-root/main/dynamic-comp/div/div/div/div[1]/div[1]/div/cotacao/table[1]/tbody/tr[1]/td[2]/span')
element = navegador.find_element_by_xpath(
'/html/body/app-root/app-root/main/dynamic-comp/div/div/div/div[1]/div[1]/div/cotacao/table[1]/tbody/tr[1]/td[2]/span')
# salva o valor do Dolar PTAX
html_content = element.get_attribute('outerHTML')
# acha o valor do Dolar PTAX de venda pelo xpath
navegador.find_element_by_xpath('/html/body/app-root/app-root/main/dynamic-comp/div/div/div/div[1]/div[1]/div/cotacao/table[1]/tbody/tr[1]/td[3]/span')
element2 = navegador.find_element_by_xpath(
'/html/body/app-root/app-root/main/dynamic-comp/div/div/div/div[1]/div[1]/div/cotacao/table[1]/tbody/tr[1]/td[3]/span')
html_content2 = element2.get_attribute('outerHTML')
dolarp = str("Dolar PTAX COMPRA: ")
linha = ("\n")
# "abre" a planilha do excel
workbook = load_workbook(filename="G:/Outros computadores/Meu modelo Laptop (1)/DolarPTAX/DOLARPTAX.xlsx")
sheet = workbook.active
preco = 0
precof = 0
precoff = 0
preco2 = 0
precof2 = 0
precoff2 = 0
# separa o lixo que vem junto com o valor do XPATH
linguagens = html_content.split('>')
linguagens2 = html_content2.split('>')
for i in linguagens:
preco = linguagens
ling = preco[1].split('<')
for i in ling:
precof = ling
precoff = precof[0]
linguagens = html_content2.split('>')
for i in linguagens2:
preco2 = linguagens2
ling2 = preco2[1].split('<')
for i in ling2:
precof2 = ling2
precoff2 = precof2[0]
row = 1
col = "A"
cel = ("%s%d" % (col, row))
# acha a última célula vazia no excel
while sheet[cel].value is not None:
row = row + 1
cel = ("%s%d" % (col, row))
sheet[cel].value = data_atual1
col = "B"
cel = ("%s%d" % (col, row))
sheet[cel].value = precoff
col = "C"
cel = ("%s%d" % (col, row))
sheet[cel].value = precoff2
workbook.save(filename="G:/Outros computadores/Meu modelo Laptop (1)/DolarPTAX/DOLARPTAX.xlsx")
navegador.quit()
|
# -*- coding: utf-8 -*-
#
# Objects for sequence management.
#
# ------------------------------------
# imports
# -------
from cached_property import cached_property as cached
from . import metrics
from . import distance
from . import utils
from . import conversion
# classes
# -------
class Sequence(object):
"""
Object for managing sequence structure and operating
on sequences (i.e. getting amino acid sequence, reverse
complement, gc content, etc ...).
Args:
sequence (str): Nucleotide sequence.
Examples:
>>> seq = sequtils.Sequence('ACGTACGT')
>>> seq.gc_content
0.25
>>> seq.revcomplement
ACGTACGT
>>> seq.dna_weight
3895.59
"""
def __init__(self, sequence):
self.sequence = str(sequence)
return
def __str__(self):
return self.sequence
def __len__(self):
return len(self.sequence)
def __add__(self, other):
if isinstance(other, Sequence):
return Sequence(self.sequence + other.sequence)
else:
return Sequence(self.sequence + other)
def __eq__(self, other):
if isinstance(other, Sequence):
return self.sequence == other.sequence
else:
return self.sequence == other
def __contains__(self, other):
if isinstance(other, Sequence):
return other.sequence in self.sequence
else:
return other in self.sequence
@cached
def revcomplement(self):
"""
Wrapper around :func:`sequtils.revcomplement`
for the :class:`sequtils.Sequence` object.
"""
return conversion.revcomplement(self.sequence)
@cached
def complement(self):
"""
Wrapper around :func:`sequtils.complement`
for the :class:`sequtils.Sequence` object.
"""
return conversion.complement(self.sequence)
@cached
def aa(self):
"""
Wrapper around :func:`sequtils.aa`
for the :class:`sequtils.Sequence` object.
"""
return conversion.aa(self.sequence)
@cached
def polydict(self):
"""
Wrapper around :func:`sequtils.polydict`
for the :class:`sequtils.Sequence` object.
"""
return metrics.polydict(self.sequence)
@cached
def polylength(self):
"""
Wrapper around :func:`sequtils.polylength`
for the :class:`sequtils.Sequence` object.
"""
return metrics.polylength(self.sequence)
@cached
def entropy(self):
"""
Wrapper around :func:`sequtils.entropy`
for the :class:`sequtils.Sequence` object.
"""
return metrics.entropy(self.sequence)
@cached
def gc_percent(self):
"""
Wrapper around :func:`sequtils.gc_percent`
for the :class:`sequtils.Sequence` object.
"""
return metrics.gc_percent(self.sequence)
@cached
def gc_skew(self):
"""
Wrapper around :func:`sequtils.gc_skew`
for the :class:`sequtils.Sequence` object.
"""
return metrics.gc_skew(self.sequence)
@cached
def gc_shift(self):
"""
Wrapper around :func:`sequtils.gc_shift`
for the :class:`sequtils.Sequence` object.
"""
return metrics.gc_shift(self.sequence)
@cached
def dna_weight(self):
"""
Wrapper around :func:`sequtils.dna_weight`
for the :class:`sequtils.Sequence` object.
"""
return metrics.dna_weight(self.sequence)
@cached
def rna_weight(self):
"""
Wrapper around :func:`sequtils.rna_weight`
for the :class:`sequtils.Sequence` object.
"""
return metrics.rna_weight(self.sequence)
@cached
def aa_weight(self):
"""
Wrapper around :func:`sequtils.aa_weight`
for the :class:`sequtils.Sequence` object.
"""
return metrics.aa_weight(self.sequence)
@cached
def zipsize(self):
"""
Wrapper around :func:`sequtils.zipsize`
for the :class:`sequtils.Sequence` object.
"""
return metrics.zipsize(self.sequence)
@cached
def tm(self):
"""
Wrapper around :func:`sequtils.zipsize`
for the :class:`sequtils.Sequence` object.
"""
return metrics.tm(self.sequence)
def wrap(self, bases=60):
"""
Wrapper around :func:`sequtils.wrap`
for the :class:`sequtils.Sequence` object.
Args:
bases (int): Number of bases to include in line.
"""
return utils.wrap(self.sequence, bases=bases)
def hamming(self, other):
"""
Wrapper around :func:`sequtils.hamming`
for the :class:`sequtils.Sequence` object.
Args:
other (str, Sequence): Sequence to compare.
"""
if isinstance(other, Sequence):
return distance.hamming(self.sequence, other.sequence)
else:
return distance.hamming(self.sequence, other)
def edit(self, other):
"""
Wrapper around :func:`sequtils.edit`
for the :class:`sequtils.Sequence` object.
Args:
other (str, Sequence): Sequence to compare.
"""
if isinstance(other, Sequence):
return distance.edit(self.sequence, other.sequence)
else:
return distance.edit(self.sequence, other)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ..sparse import SparseArray
from .._sparse import merge_sparse_arrays
test_params = [
(8,),
(8, 16),
(8, 16, 32),
]
@pytest.mark.parametrize(('shape'), test_params)
def test_sparse_init(shape):
v = SparseArray(shape)
assert (v.shape == shape)
assert (v.size == 0)
data = np.ones(shape)
v = SparseArray.from_array(data)
assert_allclose(v[...], data)
def test_sparse_getitem():
shape = (8, 16, 32)
data = np.random.poisson(np.ones(shape)).astype(float)
v = SparseArray.from_array(data)
assert_allclose(v[...], data[...])
assert_allclose(v[:], data[:])
assert_allclose(v[:, :], data[:, :])
assert_allclose(v[:, :, :], data[:, :, :])
assert_allclose(v[1, 3, 10], data[1, 3, 10])
assert_allclose(v[:, 3, 10], data[:, 3, 10])
assert_allclose(v[1, :, 10], data[1, :, 10])
assert_allclose(v[1, 3, :], data[1, 3, :])
assert_allclose(v[1, np.arange(4), :], data[1, np.arange(4), :])
assert_allclose(v[np.arange(4), np.arange(4), :],
data[np.arange(4), np.arange(4), :])
assert_allclose(v[:, np.arange(4), np.arange(4)],
data[:, np.arange(4), np.arange(4)])
@pytest.mark.parametrize(('shape'), test_params)
def test_sparse_setitem(shape):
data = np.random.poisson(np.ones(shape)).astype(float)
v = SparseArray(shape)
idx = np.where(data > 0)
v[idx] = data[idx]
assert_allclose(v[...], data)
v = SparseArray(shape)
v.set(idx, data[idx])
assert_allclose(v[...], data)
v = SparseArray(shape)
v.set(idx, data[idx], fill=True)
v.set(idx, data[idx], fill=True)
assert_allclose(v[...], 2.0 * data)
v = SparseArray(shape)
data0 = np.random.poisson(np.ones(shape)).astype(float)
data1 = np.random.poisson(np.ones(shape)).astype(float)
data = data0 + data1
idx0 = np.where(data0 > 0)
idx1 = np.where(data1 > 0)
idx_in = tuple([np.concatenate((x, y)) for x, y in zip(idx0, idx1)])
data_in = np.concatenate((data0[idx0], data1[idx1]))
v.set(idx_in, data_in, fill=True)
assert_allclose(v[...], data)
@pytest.mark.parametrize(('dtype_idx', 'dtype_val'),
[(np.int64, np.float64),
(np.int32, np.float64),
(np.int32, np.float32),
(np.int32, np.float64),
])
def test_merge_sparse_arrays(dtype_idx, dtype_val):
idx0 = np.array([0, 0, 1, 4], dtype=dtype_idx)
val0 = np.array([1.0, 2.0, 3.0, 7.0], dtype=dtype_val)
idx1 = np.array([0, 1, 2], dtype=dtype_idx)
val1 = np.array([1.0, 1.0, 1.0], dtype=dtype_val)
idx, val = merge_sparse_arrays(idx0, val0, idx1, val1)
assert_allclose(idx, np.unique(np.concatenate((idx0, idx1))))
assert_allclose(val, np.array([2.0, 3.0, 1.0, 7.0]))
idx, val = merge_sparse_arrays(idx0, val0, idx1, val1, True)
assert_allclose(idx, np.unique(np.concatenate((idx0, idx1))))
assert_allclose(val, np.array([4.0, 4.0, 1.0, 7.0]))
|
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import time as timer
import os
import KratosMultiphysics as Kratos
from Kratos import Logger
import KratosMultiphysics.DEMApplication as Dem
Logger.Print("Running under OpenMP........", label="DEM")
import KratosMultiphysics.StructuralMechanicsApplication as Structural
import KratosMultiphysics.DemStructuresCouplingApplication as DemFem
from KratosMultiphysics.DemStructuresCouplingApplication.dem_fem_coupling_algorithm import Algorithm
class SPAlgorithm(Algorithm):
def __init__(self):
super(SPAlgorithm,self).__init__()
sp_parameters_file_name = "SPParameters.json"
with open(sp_parameters_file_name,'r') as parameter_file:
self.sp_parameters = Kratos.Parameters(parameter_file.read())
self.ValidateSettings()
self.test_number = self.sp_parameters["problem_data"]["test_number"].GetInt()
# Test types (4 different options):
# Test number 0: no test simulation
# Test number 1: CTW16 specimen
# Test number 2: CTW10 specimen
# Test number 3: Blind test specimen
self.post_process_step_count = 0
self.post_process_frequency = self.sp_parameters["post_process_tool"]["output_frequency"].GetInt()
self.post_process_write_count = self.post_process_frequency
@classmethod
def GetDefaultSettings(cls):
"""This function returns the default-settings used by this class
"""
return Kratos.Parameters("""{
"problem_data" : {
"test_number" : 1,
"center" : [0.0,0.0,0.0],
"axis" : [0.0,0.0,1.0]
},
"post_process_tool":{
"output_frequency": 0
}
}""")
def ValidateSettings(self):
"""This function validates the settings of the solver
"""
default_settings = self.GetDefaultSettings()
self.sp_parameters.ValidateAndAssignDefaults(default_settings)
def Initialize(self):
super(SPAlgorithm,self).Initialize()
self.InitializeAdditionalProcessInfoVars()
if self.test_number:
from KratosMultiphysics.DemStructuresCouplingApplication.control_module_fem_dem_utility import ControlModuleFemDemUtility
self.control_module_fem_dem_utility = ControlModuleFemDemUtility(self.model, self.dem_solution.spheres_model_part, self.test_number)
self.control_module_fem_dem_utility.ExecuteInitialize()
# Create Postprocess tool for SP
from KratosMultiphysics.DemStructuresCouplingApplication.sand_production_post_process_tool import SandProductionPostProcessTool
self.sp_post_process_tool = SandProductionPostProcessTool(self.structural_solution._GetSolver().GetComputingModelPart(),
self.dem_solution.spheres_model_part,
self.test_number)
from KratosMultiphysics.DemStructuresCouplingApplication import stress_failure_check_utility
self.stress_failure_check_utility = stress_failure_check_utility.StressFailureCheckUtility(self.dem_solution.spheres_model_part, self.test_number)
def InitializeAdditionalProcessInfoVars(self):
self.dem_solution.spheres_model_part.ProcessInfo.SetValue(Dem.SIGMA_3_AVERAGE, 0.0)
def RunSolutionLoop(self):
self.dem_solution.step = 0
self.dem_solution.time = 0.0
self.dem_solution.time_old_print = 0.0
self.time_dem = 0.0
self.Dt_structural = self.structural_solution._GetSolver().settings["time_stepping"]["time_step"].GetDouble()
while self.structural_solution.time < self.structural_solution.end_time:
portion_of_the_force_which_is_new = 0.4
DemFem.DemStructuresCouplingUtilities().SmoothLoadTrasferredToFem(self.dem_solution.rigid_face_model_part, portion_of_the_force_which_is_new)
self.structural_solution.time = self.structural_solution._GetSolver().AdvanceInTime(self.structural_solution.time)
self.structural_solution.InitializeSolutionStep()
if self.test_number:
self.control_module_fem_dem_utility.ExecuteInitializeSolutionStep()
self.structural_solution._GetSolver().Predict()
self.structural_solution._GetSolver().SolveSolutionStep()
self.structural_solution.FinalizeSolutionStep()
self.structural_solution.OutputSolutionStep()
time_final_DEM_substepping = self.structural_solution.time
self.Dt_DEM = self.dem_solution.spheres_model_part.ProcessInfo.GetValue(Kratos.DELTA_TIME)
DemFem.InterpolateStructuralSolutionForDEM().SaveStructuralSolution(self.structural_mp)
DemFem.ComputeDEMFaceLoadUtility().ClearDEMFaceLoads(self.skin_mp)
if self.test_number == 1 or self.test_number == 2:
self.outer_walls_model_part = self.model["Structure.SurfacePressure3D_lateral_pressure"]
#DemFem.DemStructuresCouplingUtilities().ComputeSandProductionWithDepthFirstSearch(self.dem_solution.spheres_model_part, self.outer_walls_model_part, self.structural_solution.time)
DemFem.DemStructuresCouplingUtilities().ComputeSandProductionWithDepthFirstSearchNonRecursiveImplementation(self.dem_solution.spheres_model_part, self.outer_walls_model_part, self.structural_solution.time)
DemFem.DemStructuresCouplingUtilities().ComputeSandProduction(self.dem_solution.spheres_model_part, self.outer_walls_model_part, self.structural_solution.time)
elif self.test_number == 3:
self.outer_walls_model_part_1 = self.model["Structure.SurfacePressure3D_sigmaXpos"]
self.outer_walls_model_part_2 = self.model["Structure.SurfacePressure3D_sigmaYpos"]
DemFem.DemStructuresCouplingUtilities().ComputeTriaxialSandProduction(self.dem_solution.spheres_model_part, self.outer_walls_model_part_1, self.outer_walls_model_part_2, self.structural_solution.time)
for self.dem_solution.time_dem in self.yield_DEM_time(self.dem_solution.time, time_final_DEM_substepping, self.Dt_DEM):
self.dem_solution.time = self.dem_solution.time + self.dem_solution._GetSolver().dt
self.dem_solution.step += 1
self.dem_solution.DEMFEMProcedures.UpdateTimeInModelParts(self.dem_solution.all_model_parts, self.dem_solution.time, self.dem_solution._GetSolver().dt, self.dem_solution.step)
self.dem_solution.InitializeSolutionStep()
self.dem_solution._GetSolver().Predict()
DemFem.InterpolateStructuralSolutionForDEM().InterpolateStructuralSolution(self.structural_mp, self.Dt_structural, self.structural_solution.time, self.dem_solution._GetSolver().dt, self.dem_solution.time)
self.dem_solution.SolverSolve()
DemFem.DemStructuresCouplingUtilities().MarkBrokenSpheres(self.dem_solution.spheres_model_part)
center = Kratos.Array3()
center[0] = self.sp_parameters["problem_data"]["center"][0].GetDouble()
center[1] = self.sp_parameters["problem_data"]["center"][1].GetDouble()
center[2] = self.sp_parameters["problem_data"]["center"][2].GetDouble()
axis = Kratos.Array3()
axis[0] = self.sp_parameters["problem_data"]["axis"][0].GetDouble()
axis[1] = self.sp_parameters["problem_data"]["axis"][1].GetDouble()
axis[2] = self.sp_parameters["problem_data"]["axis"][2].GetDouble()
radius = 0
if self.test_number == 1:
radius = 0.0036195; #95% of the real hole. CTW16 specimen
elif self.test_number == 2:
radius = 0.012065; #95% of the real hole. CTW10 specimen
elif self.test_number == 3:
radius = 0.036195; #95% of the real hole. Blind Test
self.dem_solution.creator_destructor.MarkParticlesForErasingGivenCylinder(self.dem_solution.spheres_model_part, center, axis, radius)
self.dem_solution.FinalizeSolutionStep()
DemFem.ComputeDEMFaceLoadUtility().CalculateDEMFaceLoads(self.skin_mp, self.dem_solution._GetSolver().dt, self.Dt_structural)
#### PRINTING GRAPHS ####
os.chdir(self.dem_solution.graphs_path)
self.dem_solution.post_utils.ComputeMeanVelocitiesInTrap("Average_Velocity.txt", self.dem_solution.time, self.dem_solution.graphs_path)
self.dem_solution.materialTest.MeasureForcesAndPressure()
self.dem_solution.materialTest.PrintGraph(self.dem_solution.time)
self.dem_solution.DEMFEMProcedures.PrintGraph(self.dem_solution.time)
self.dem_solution.DEMFEMProcedures.PrintBallsGraph(self.dem_solution.time)
self.dem_solution.DEMEnergyCalculator.CalculateEnergyAndPlot(self.dem_solution.time)
self.dem_solution.BeforePrintingOperations(self.dem_solution.time)
#### GiD IO ##########################################
if self.dem_solution.IsTimeToPrintPostProcess():
self.dem_solution._GetSolver().PrepareElementsForPrinting()
if self.dem_solution.DEM_parameters["ContactMeshOption"].GetBool():
self.dem_solution._GetSolver().PrepareContactElementsForPrinting()
self.dem_solution.PrintResultsForGid(self.dem_solution.time)
self.dem_solution.demio.PrintMultifileLists(self.dem_solution.time, self.dem_solution.post_path)
self.dem_solution.time_old_print = self.dem_solution.time
if self.test_number:
self.stress_failure_check_utility.ExecuteFinalizeSolutionStep()
self.dem_solution.FinalizeTimeStep(self.dem_solution.time)
DemFem.InterpolateStructuralSolutionForDEM().RestoreStructuralSolution(self.structural_mp)
if self.test_number:
self.control_module_fem_dem_utility.ExecuteFinalizeSolutionStep()
# Write SP data
if self.IsPostProcessWriteStep():
self.sp_post_process_tool.WriteData()
def IsPostProcessWriteStep(self):
self.post_process_step_count += 1
if self.post_process_step_count == self.post_process_write_count:
self.post_process_write_count += self.post_process_frequency
return True
else:
return False
if __name__ == "__main__":
SPAlgorithm().Run()
|
# Generated by Django 3.0.2 on 2020-02-27 22:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playbooks_parser', '0002_auto_20200225_1713'),
]
operations = [
migrations.AddField(
model_name='playbook',
name='playbook_content',
field=models.TextField(default=''),
preserve_default=False,
),
]
|
"""
grib_bbox.py
Copyright (C) 2020-2021 Andreas Motl
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import dataclasses
import json
import logging
import os
import tempfile
from pathlib import Path
from typing import List
import click
from click_option_group import RequiredMutuallyExclusiveOptionGroup, optgroup
from gribmagic.smith.util import FileProcessor, ProcessingResult, json_serializer
from gribmagic.util import setup_logging
logger = logging.getLogger(__name__)
"""
This programs supports the topic "Area of interest from GRIB files".
See https://github.com/earthobservations/gribmagic/blob/main/docs/area_of_interest.rst.
"""
@dataclasses.dataclass
class BBox:
"""
This holds bounding box information.
It has to factory methods to create a bounding box
- ``from_country`` uses an ISO 2-letter country code
- ``from_coordinates`` uses a 4-tuple (lat_min, lat_max, lon_min, lon_max)
"""
latitude_min: float
latitude_max: float
longitude_min: float
longitude_max: float
@staticmethod
def from_country(country_iso: str):
"""
Create bounding box using 2-letter country code.
:param country_iso: 2-letter country code
:return: BBox instance
"""
from country_bounding_boxes import country_subunits_by_iso_code
# Lookup using "country_bounding_boxes"
# responds with (lon1, lat1, lon2, lat2) tuple.
countries = list(country_subunits_by_iso_code(country_iso))
if not countries:
raise ValueError(f"Unknown country iso code: {country_iso}")
bbox = countries[0].bbox
bbox = BBox(
latitude_min=bbox[1],
latitude_max=bbox[3],
longitude_min=bbox[0],
longitude_max=bbox[2],
)
return bbox
@staticmethod
def from_coordinates(bbox_tuple: tuple):
"""
Create bounding box using 4-tuple.
:param bbox_tuple: 4-tuple (lat_min, lat_max, lon_min, lon_max)
:return: BBox instance
"""
#
bbox = BBox(*bbox_tuple)
return bbox
def to_tuple(self, lonlat: bool = False) -> tuple:
"""
Return bounding box as 4-tuple, optionally swaps to longitude/latitude.
:param lonlat: Whether to swap to lon/lat.
:return: 4-tuple
"""
if lonlat:
# Return tuple like (lon_min, lon_max, lat_min, lat_max)
# This is needed for CDO.
bbox_tuple = (
self.longitude_min,
self.longitude_max,
self.latitude_min,
self.latitude_max,
)
else:
# Return tuple like (lat_min, lat_max, lon_min, lon_max)
bbox_tuple = dataclasses.astuple(self)
return bbox_tuple
def to_string(self, separator: str, lonlat: bool = False) -> str:
"""
Return bounding box as 4-tuple, serialized to a string using given separator.
Optionally swaps to longitude/latitude.
:param separator: Separator character to use when joining tuple elements.
:param lonlat: Whether to swap to lon/lat.
:return:
"""
bbox_tuple = self.to_tuple(lonlat=lonlat)
return separator.join(map(str, bbox_tuple))
class GRIBSubset:
"""
The main workhorse to read a number of GRIB files and
extract a subset by applying a bounding box.
It can use different methods like
- cdo-shellout
- cdo-python
- xarray
As of today, Xarray's cfgrib backend (version 0.9.8.5) can
not properly write GRIB output, so there is an option to work
around that by using netCDF.
"""
def __init__(
self,
input: List[Path],
output: Path,
bbox: BBox,
method: str,
use_netcdf: bool,
plot: bool,
dry_run: bool = False,
):
"""
Create a new GRIBSubset instance.
:param input: List of input filenames.
:param output: Output directory. If this doesn't exist, it will be created beforehand.
:param bbox: The BBox instance describing the area of interest.
:param method: One of the methods how bbox'ing will take place.
:param use_netcdf: Whether to process into netCDF.
:param plot:
"""
self.input = input
self.output = output
self.bbox = bbox
self.method = method
self.use_netcdf = use_netcdf
self.do_plot = plot
self.dry_run = dry_run
# Compute output folder.
subdirectory = f'bbox_{self.bbox.to_string("_")}'
self.outfolder = Path(self.output).joinpath(subdirectory)
def process(self) -> List[ProcessingResult]:
"""
Process all input files.
:return: List of ``ProcessingResult`` instances
"""
processor = FileProcessor(input=self.input, method=self.step)
return processor.resolve().run()
def step(self, item: ProcessingResult) -> None:
"""
Process a singe input item.
:param item:
:return:
"""
# Render GRIB.
gribfile_subgrid = self.extract_area(item.input)
item.output = gribfile_subgrid
# Render PNG.
if self.do_plot:
try:
pngfile = self.plot(gribfile_subgrid)
item.plot = pngfile
except Exception as ex:
logger.exception(f"Plotting failed: {ex}")
# TODO: Raise exception conditionally.
raise
def extract_area(self, infile: Path) -> Path:
"""
Main area subsetting method.
:param infile: Path to input file
:return: Path to output file
"""
# Prepare information about output file.
if self.use_netcdf:
folder = "netcdf"
suffix = ".nc"
else:
folder = "grib"
suffix = None
# Compute output file location.
outfolder = self.outfolder.joinpath(folder)
outfolder.mkdir(parents=True, exist_ok=True)
outfile = outfolder.joinpath(infile.name)
if suffix:
outfile = outfile.with_suffix(suffix)
if self.dry_run:
return outfile
# Apply bounding box to GRIB file.
if self.method == "cdo-shellout":
payload = self.bbox_cdo_shellout(infile)
elif self.method == "cdo-python":
payload = self.bbox_cdo_python(infile)
elif self.method == "xarray":
payload = self.bbox_xarray(infile)
# Write output file.
open(outfile, "wb").write(payload)
return outfile
def bbox_cdo_shellout(self, infile: Path) -> bytes:
"""
Apply bounding box using "cdo".
Here, we build the command ourselves.
- https://code.mpimet.mpg.de/projects/cdo/wiki/Tutorial
- https://github.com/mhaberler/docker-dwd-open-data-downloader/blob/003ab3f/extract/Makefile#L53-L62
:param infile: Path to input file
:return: Content of output file
"""
# cdo -sellonlatbox,-180,180,0,90 <infile> <outfile>
bbox_string = self.bbox.to_string(",", lonlat=True)
tmpfile = tempfile.NamedTemporaryFile()
# Compute output format.
output_format = ""
# FIXME: That would yield a netCDF file with parameter "2t" instead of "t2m".
"""
if self.use_netcdf:
output_format = "--format=nc4"
"""
command = f"cdo --eccodes --cmor {output_format} sellonlatbox,{bbox_string} '{infile}' '{tmpfile.name}'"
exitcode = os.system(command)
assert exitcode == 0, f"Invoking `cdo` failed. command={command}"
return self.to_grib_or_netcdf(tmpfile.name)
def bbox_cdo_python(self, infile: Path) -> bytes:
"""
Apply bounding box using "cdo".
Here, we use the Python wrapper.
- https://pypi.org/project/cdo/
- https://code.mpimet.mpg.de/boards/1/topics/6392
:param infile: Path to input file
:return: Content of output file
"""
import cdo
bbox_string = self.bbox.to_string(",", lonlat=True)
cdo = cdo.Cdo(logging=True, debug=False)
tmpfile = tempfile.NamedTemporaryFile()
cdo.sellonlatbox(bbox_string, input=str(infile), output=tmpfile.name)
return self.to_grib_or_netcdf(tmpfile.name)
def to_grib_or_netcdf(self, gribfile: str) -> bytes:
"""
Depending on the configuration of GRIBSubset,
either return content of GRIB file or convert
to netCDF-4 format with compression.
This is needed because the ``--format=nc4`` option of ``cdo``
would produce a netCDF-4 file with parameter "2t" instead of "t2m".
:param gribfile: Path to input GRIB file
:return: Content of output file
"""
if self.use_netcdf:
tmpfile_netcdf = tempfile.NamedTemporaryFile()
command = f"grib_to_netcdf -k 4 -d 6 -o '{tmpfile_netcdf.name}' '{gribfile}'"
os.system(command)
outfile = tmpfile_netcdf.name
else:
outfile = gribfile
return open(outfile, "rb").read()
def bbox_xarray(self, infile: Path) -> bytes:
"""
Apply bounding box using Xarray.
- https://xarray.pydata.org/en/stable/generated/xarray.Dataset.where.html
- https://stackoverflow.com/a/62209490
FIXME: Needs a patch.
Currently, Xarray will croak on indexing the Pandas datetime field
when operating on GRIB2 files.
:param infile: Path to input file
:return: Content of output file
"""
import xarray as xr
from cfgrib.xarray_to_grib import to_grib
ds = xr.open_dataset(infile, engine="cfgrib")
result: xr.Dataset = ds.where(
(ds.latitude >= self.bbox.latitude_min)
& (ds.latitude <= self.bbox.latitude_max)
& (ds.longitude >= self.bbox.longitude_min)
& (ds.longitude <= self.bbox.longitude_max),
drop=True,
)
tmpfile = tempfile.NamedTemporaryFile()
if self.use_netcdf:
result.to_netcdf(tmpfile.name)
else:
to_grib(result, tmpfile.name)
return open(tmpfile.name, "rb").read()
def plot(self, infile: Path) -> Path:
"""
Plot the outcome using ECMWF Magics.
TODO: Use custom ``magics.mmap()`` instead of
``subpage_map_area_name="central_europe"``
for better zooming into the area of interest.
:param infile: Path to input file
:return: Path to output file
"""
# Suppress banner output on STDOUT.
os.environ["MAGPLUS_QUIET"] = "true"
from Magics import macro as magics
# Compute outfile location.
outfolder = self.outfolder.joinpath("png")
outfolder.mkdir(parents=True, exist_ok=True)
outfile = outfolder.joinpath(infile.name)
outfile_real = outfile.with_suffix(".png")
if self.dry_run:
return outfile_real
# Setting of the output file name
output = magics.output(
output_name=str(outfile), output_formats=["png"], output_name_first_page_number="off"
)
# Import the data
if self.use_netcdf:
# When plotting netCDF, the variable name has to be given.
netcdf_variable = get_netcdf_main_variable(infile)
data = magics.mnetcdf(
netcdf_filename=str(infile), netcdf_value_variable=netcdf_variable
)
else:
data = magics.mgrib(grib_input_file_name=str(infile))
# Apply an automatic styling
contour = magics.mcont(contour_automatic_setting="ecmwf")
coast = magics.mcoast()
# Select area by coordinates
# https://github.com/ecmwf/notebook-examples/blob/master/visualisation/tutorials/Subpage-Projections.ipynb
projection = magics.mmap(
subpage_map_library_area="on",
subpage_map_area_name="central_europe",
page_id_line="off",
)
"""
projection = magics.mmap(
subpage_map_projection="cylindrical",
subpage_lower_left_latitude=bbox[1] + 15,
subpage_lower_left_longitude=bbox[0] - 15,
subpage_upper_right_latitude=bbox[3] + 15,
subpage_upper_right_longitude=bbox[2] - 15,
)
"""
# magics.plot(output, data, contour, projection, coast)
# magics.plot(output, projection, coast)
magics.plot(output, projection, data, contour, coast)
return outfile_real
def get_netcdf_main_variable(filename: str) -> str:
"""
Return first variable from netCDF file.
This is usually what you want.
Examples:
>>> f.variables.keys()
dict_keys(['t2m', 'time', 'step', 'heightAboveGround', 'latitude', 'longitude', 'valid_time'])
>>> f.variables.keys()
dict_keys(['u', 'time', 'step', 'isobaricInhPa', 'latitude', 'longitude', 'valid_time'])
:param filename:
:return:
"""
import netCDF4
nc = netCDF4.Dataset(filename)
first_variable = list(nc.variables.keys())[0]
nc.close()
return first_variable
@click.command(
help="""
Extract area of interest from GRIB files using a bounding box.
INPUT can be a single file or a list of files.
For specifying the area of interest, either use "--country" or "--bbox".
"""
)
@click.argument("input", type=click.Path(file_okay=True, dir_okay=True), required=True, nargs=-1)
@click.option(
"--output",
envvar="GM_DATA_PATH",
type=click.Path(exists=False, file_okay=False, dir_okay=True),
help="The output directory",
required=True,
)
@optgroup.group("area", cls=RequiredMutuallyExclusiveOptionGroup, help="The area of interest")
@optgroup.option("--country", type=str, help="The country ISO code to derive a bounding box")
@optgroup.option(
"--bbox",
type=click.Tuple([float, float, float, float]),
nargs=4,
help="The bounding box. Use a space-separated list of 'lat_min lat_max lon_min lon_max'",
default=(None, None, None, None),
)
@click.option(
"--method",
type=click.Choice(["cdo-shellout", "cdo-python", "xarray"], case_sensitive=False),
help="Which bbox method to use, defaults to cdo-shellout",
required=False,
default="cdo-shellout",
)
@click.option("--use-netcdf", is_flag=True, help="Whether to use netCDF", required=False)
@click.option("--plot", is_flag=True, help="Whether to produce png plots", required=False)
@click.option(
"--dry-run", is_flag=True, help="Whether to simulate processing", required=False, default=False
)
def main(
input: List[Path],
output: Path,
country: str,
bbox: tuple,
method: str,
use_netcdf: bool,
plot: bool,
dry_run: bool,
):
# Setup logging.
setup_logging(level=logging.INFO)
# Create bounding box from selected area of interest.
if country:
bbox = BBox.from_country(country)
elif bbox:
bbox = BBox.from_coordinates(bbox)
logger.info(f"Using bounding box {bbox}")
# Invoke the machinery.
subgrid = GRIBSubset(
input=input,
output=output,
bbox=bbox,
method=method,
use_netcdf=use_netcdf,
plot=plot,
dry_run=dry_run,
)
results = subgrid.process()
# Report about the outcome.
print(json.dumps(results, default=json_serializer, indent=4))
if __name__ == "__main__": # pragma: nocover
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 12:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
import speeches.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='creation time')),
('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='last modification time')),
('url', models.URLField(help_text='A URL', max_length=350, verbose_name='url')),
('note', models.CharField(blank=True, help_text="A note, e.g. 'Wikipedia page'", max_length=256, null=True, verbose_name='note')),
('name', models.TextField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Mandate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='creation time')),
('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='last modification time')),
('name', models.TextField(help_text='A primary name, e.g. a legally recognized name', verbose_name='name')),
('name_parser', models.CharField(blank=True, help_text='Name for parser.', max_length=500, null=True)),
('acronym', models.CharField(blank=True, help_text='Organization acronym', max_length=128, null=True, verbose_name='acronym')),
('gov_id', models.TextField(blank=True, help_text='Gov website ID', null=True, verbose_name='Gov website ID')),
('classification', models.CharField(blank=True, help_text='An organization category, e.g. committee', max_length=128, null=True, verbose_name='classification')),
('dissolution_date', speeches.models.PopoloDateTimeField(blank=True, help_text='A date of dissolution', null=True)),
('founding_date', speeches.models.PopoloDateTimeField(blank=True, help_text='A date of founding', null=True)),
('description', models.TextField(blank=True, help_text='Organization description', null=True)),
('is_coalition', models.IntegerField(blank=True, help_text='1 if coalition, -1 if not, 0 if it does not apply', null=True)),
('parent', models.ForeignKey(blank=True, help_text='The organization that contains this organization', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='speeches.Organization')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='creation time')),
('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='last modification time')),
('name', models.CharField(help_text="A person's preferred full name", max_length=128, verbose_name='name')),
('name_parser', models.CharField(blank=True, help_text='Name for parser.', max_length=500, null=True)),
('classification', models.CharField(blank=True, help_text='Classification for sorting purposes.', max_length=128, null=True, verbose_name='classification')),
('family_name', models.CharField(blank=True, help_text='One or more family names', max_length=128, null=True, verbose_name='family name')),
('given_name', models.CharField(blank=True, help_text='One or more primary given names', max_length=128, null=True, verbose_name='given name')),
('additional_name', models.CharField(blank=True, help_text='One or more secondary given names', max_length=128, null=True, verbose_name='additional name')),
('honorific_prefix', models.CharField(blank=True, help_text="One or more honorifics preceding a person's name", max_length=128, null=True, verbose_name='honorific prefix')),
('honorific_suffix', models.CharField(blank=True, help_text="One or more honorifics following a person's name", max_length=128, null=True, verbose_name='honorific suffix')),
('patronymic_name', models.CharField(blank=True, help_text='One or more patronymic names', max_length=128, null=True, verbose_name='patronymic name')),
('sort_name', models.CharField(blank=True, help_text='A name to use in an lexicographically ordered list', max_length=128, null=True, verbose_name='sort name')),
('previous_occupation', models.TextField(blank=True, help_text="The person's previous occupation", null=True, verbose_name='previous occupation')),
('education', models.TextField(blank=True, help_text="The person's education", null=True, verbose_name='education')),
('education_level', models.TextField(blank=True, help_text="The person's education level", null=True, verbose_name='education level')),
('mandates', models.IntegerField(blank=True, help_text="Person's number of mandates, including the current one", null=True, verbose_name='mandates')),
('email', models.EmailField(blank=True, help_text='A preferred email address', max_length=254, null=True, verbose_name='email')),
('gender', models.CharField(blank=True, help_text='A gender', max_length=128, null=True, verbose_name='gender')),
('birth_date', speeches.models.PopoloDateTimeField(blank=True, help_text='A date of birth', null=True, verbose_name='date of birth')),
('death_date', speeches.models.PopoloDateTimeField(blank=True, help_text='A date of death', null=True, verbose_name='date of death')),
('summary', models.CharField(blank=True, help_text="A one-line account of a person's life", max_length=512, null=True, verbose_name='summary')),
('biography', models.TextField(blank=True, help_text="An extended account of a person's life", null=True, verbose_name='biography')),
('image', models.URLField(blank=True, help_text='A URL of a head shot', null=True, verbose_name='image')),
('gov_id', models.CharField(blank=True, help_text='gov website id for the scraper', max_length=255, null=True, verbose_name='gov_id')),
('gov_picture_url', models.URLField(blank=True, help_text='URL to gov website pic', null=True, verbose_name='gov image url')),
('voters', models.IntegerField(blank=True, help_text='number of votes cast for this person in their district', null=True, verbose_name='voters')),
('active', models.BooleanField(default=True, help_text='a generic active or not toggle', verbose_name='active')),
('gov_url', models.ForeignKey(blank=True, help_text='URL to gov website profile', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='gov_link', to='speeches.Link')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='creation time')),
('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='last modification time')),
('name', models.CharField(blank=True, help_text='Session name', max_length=255, null=True)),
('gov_id', models.CharField(blank=True, help_text='Gov website ID.', max_length=255, null=True)),
('start_time', speeches.models.PopoloDateTimeField(blank=True, help_text='Start time', null=True)),
('end_time', speeches.models.PopoloDateTimeField(blank=True, help_text='End time', null=True)),
('classification', models.CharField(blank=True, help_text='Session classification', max_length=128, null=True)),
('in_review', models.BooleanField(default=False, help_text='Is session in review?')),
('mandate', models.ForeignKey(blank=True, help_text='The mandate of this milestone.', null=True, on_delete=django.db.models.deletion.CASCADE, to='speeches.Mandate')),
('organization', models.ForeignKey(blank=True, help_text='The organization in session', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='session', to='speeches.Organization')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Speech',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='creation time')),
('updated_at', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='last modification time')),
('valid_from', models.DateTimeField(blank=True, default=None, help_text='row valid from', null=True)),
('valid_to', models.DateTimeField(blank=True, default=None, help_text='row valid to', null=True)),
('content', models.TextField(help_text='Words spoken')),
('order', models.IntegerField(blank=True, help_text='Order of speech', null=True)),
('start_time', speeches.models.PopoloDateTimeField(blank=True, help_text='Start time', null=True)),
('end_time', speeches.models.PopoloDateTimeField(blank=True, help_text='End time', null=True)),
('party', models.ForeignKey(blank=True, default=2, help_text='The party of the person making the speech', null=True, on_delete=django.db.models.deletion.CASCADE, to='speeches.Organization')),
('session', models.ForeignKey(blank=True, help_text='Speech session', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='speeches', to='speeches.Session')),
('speaker', models.ForeignKey(help_text='Person making the speech', on_delete=django.db.models.deletion.CASCADE, to='speeches.Person')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='link',
name='organization',
field=models.ForeignKey(blank=True, help_text='The organization of this link.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='links', to='speeches.Organization'),
),
migrations.AddField(
model_name='link',
name='person',
field=models.ForeignKey(blank=True, help_text='The person of this link.', null=True, on_delete=django.db.models.deletion.CASCADE, to='speeches.Person'),
),
migrations.AddField(
model_name='link',
name='session',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='speeches.Session'),
),
]
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This script defines TensorflowCheckpointDumper class.
This class takes a tensorflow checkpoint file and writes all of the variables in the
checkpoint to a directory which deeplearnjs can take as input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import iteritems
import argparse
import json
import os
import re
import tensorflow as tf
from checkpoint_dumper import CheckpointDumper
class TensorflowCheckpointDumper(CheckpointDumper):
"""Class for dumping Tensorflow Checkpoints.
Attributes
----------
reader : NewCheckpointReader
Reader for given tensorflow checkpoint
"""
def __init__(self, checkpoint_file, output_dir, remove_variables_regex):
"""Constructs object for Tensorflow Checkpoint Dumper.
Parameters
----------
checkpoint_file : str
Path to the model checkpoint
output_dir : str
Output directory path
remove_variables_regex : str
Regex expression for variables to be ignored
"""
super(TensorflowCheckpointDumper, self).__init__(
checkpoint_file, output_dir, remove_variables_regex)
self.reader = tf.train.NewCheckpointReader(self.checkpoint_file)
def var_name_to_filename(self, var_name):
"""Converts variable names to standard file names.
Parameters
----------
var_name : str
Variable name to be converted
Returns
-------
str
Standardized file name
"""
chars = []
for c in var_name:
if c in CheckpointDumper.FILENAME_CHARS:
chars.append(c)
elif c == '/':
chars.append('_')
return ''.join(chars)
def build_and_dump_vars(self):
"""Builds and dumps variables and a manifest file.
"""
var_to_shape_map = self.reader.get_variable_to_shape_map()
for (var_name, var_shape) in iteritems(var_to_shape_map):
if self.should_ignore(var_name) or var_name == 'global_step':
print('Ignoring ' + var_name)
continue
var_filename = self.var_name_to_filename(var_name)
self.manifest[var_name] = {'filename': var_filename, 'shape': var_shape}
tensor = self.reader.get_tensor(var_name)
self.dump_weights(var_name, var_filename, var_shape, tensor)
self.dump_manifest()
|
import glob
import os
import numpy as np
from astropy.io import fits
from tqdm import tqdm
from lcc.db_tier.base_query import LightCurvesDb
from lcc.entities.exceptions import InvalidFilesPath, InvalidFile
from lcc.entities.light_curve import LightCurve
from lcc.entities.star import Star
from lcc.utils.output_process_modules import loadFromFile
# TODO: This class need to be upgraded
class FileManager(LightCurvesDb):
"""
This class is responsible for managing light curve files
Attributes
-----------
path : list, iterable
Path key of folder of light curves .
star_class : str
Name of the loaded star-like type (e.g. Cepheids)
suffix : str
Suffix of light curve files in the folder. If suffix is "fits",
files are loaded as fits files, otherwise files are considered
as .dat files of light curve such as:
#time mag err
12 13.45 0.38
files_limit : int, str
Number of files which will be loaded
db_ident : str
Name of the database to which the file name will be assigned
EXAMPLE:
For the file "my_macho_star.dat" and given db_ident as "macho"
makes Star object:
star.ident["macho"] --> my_macho_star
files_to_load : iterable of str
List of file names which should be loaded from the given folder.
If it is not specified all files will be loaded
object_file_name : str
Name of the pickle file which contains list of star objects
"""
SUFFIXES = ["dat", "txt", "fits", "FITS"]
DEFAULT_STARCLASS = "star"
FITS_RA = "RA"
FITS_DEC = "DEC"
FITS_RA_UNIT = "RA_UN"
FITS_DEC_UNIT = "DEC_UN"
FITS_NAME = "IDENT"
FITS_CLASS = "CLASS"
DB_ORIGIN = "DB_ORIGIN"
FITS_SUFFIX = ("fits", "FITS")
BAD_VALUES = ("-99", "-99.0", "99", None, "N/A", np.NaN)
TIME_COL = 0 # Order of columns in the light curve file
MAG_COL = 1
ERR_COL = 2
ROUND_DIGITS = 3
QUERY_OPTIONS = ["path"]
def __init__(self, obtain_params):
"""
Parameters
----------
obtain_params : dict
Query dictionary (see class Attributes doc above)
"""
if isinstance(obtain_params, list) and len(obtain_params) == 1:
obtain_params = obtain_params[0]
path = obtain_params.get("path", None)
if not path:
raise IOError("Path %s was not found" % path)
if isinstance(path, str):
path = [path]
self.path = path
self.star_class = obtain_params.get(
"star_class", self.DEFAULT_STARCLASS)
self.suffix = obtain_params.get("suffix", None)
file_lim = obtain_params.get("files_limit")
if file_lim:
self.files_limit = int(file_lim)
else:
self.files_limit = None
self.db_ident = obtain_params.get("db_ident")
self.files_to_load = obtain_params.get("files_to_load")
self.object_file_name = obtain_params.get("object_file_name")
def getStars(self, load_lc=True):
"""
Common method for all stars provider
If there are object_file_name in query dictionary, the object file
of list of stars is loaded. In other case files from given path of
the folder is loaded into star objects.
Returns
--------
list of `Star` objects
Star objects with light curves
"""
if self.object_file_name:
return self._load_stars_object()
else:
stars = []
for path in self.path:
stars += self._load_stars_from_folder(path)
return stars
def _load_stars_from_folder(self, path):
"""Load all files with a certain suffix as light curves"""
if not path.endswith("/"):
path += "/"
# Get all light curve files (all files which end with certain suffix
if not self.suffix:
stars_list = []
for suffix in self.SUFFIXES:
stars_list += glob.glob("{}*{}".format(path, suffix))
else:
stars_list = glob.glob("{}*{}".format(path, self.suffix))
found_files = len(stars_list)
if found_files == 0:
if self.suffix:
raise InvalidFilesPath(
"There are no stars in %s with %s suffix" % (path, self.suffix))
else:
raise InvalidFilesPath(
"There are no stars in %s with any of supported suffix: %s" % (path, self.SUFFIXES))
files_limit = self.files_limit or found_files
if self.suffix in self.FITS_SUFFIX:
return self._loadFromFITS(stars_list, files_limit)
stars = self._loadDatFiles(
[s for s in stars_list if s.endswith("dat")], files_limit)
stars += self._loadFromFITS(
[s for s in stars_list if s.endswith("fits")], files_limit)
return stars
def _loadDatFiles(self, star_paths, numberOfFiles):
if not star_paths:
return []
stars = []
counter = 1
# Load every light curve and put it into star object
for singleFile in tqdm(star_paths[:numberOfFiles], desc="Loading dat files:"):
if self.files_to_load and os.path.basename(singleFile) not in self.files_to_load:
break
lc = LightCurve(self._loadLcFromDat(singleFile))
# Check if light curve is not empty
if (len(lc.mag) >= 1):
db_ident = self.parseFileName(singleFile)
if self.db_ident:
ident = {self.db_ident: {"name": db_ident}}
else:
ident = {"file": {"name": db_ident}}
star = Star(ident=ident)
star.starClass = self.star_class
star.putLightCurve(lc)
stars.append(star)
counter += 1
return stars
@classmethod
def _loadLcFromDat(cls, file_name):
"""
Load Light curve from dat file of light curve
Parameters
-----------
file_with_path : str
Name of the light curve file with its path
Returns
--------
List of tuples of (time, mag, err)
"""
try:
dat = np.loadtxt(file_name, usecols=(
cls.TIME_COL, cls.MAG_COL, cls.ERR_COL), skiprows=0)
except IndexError:
dat = np.loadtxt(file_name, usecols=(
cls.TIME_COL, cls.MAG_COL, cls.ERR_COL), skiprows=2)
except IOError as Argument:
raise InvalidFilesPath(
"\nCannot open light curve file\n %s" % Argument)
mag, time, err = dat.T
if not (len(mag) == len(time) == len(err)):
raise InvalidFile(
"Length of columns in light curve file is not the same")
else:
clean_dat = []
for x, y, z in zip(mag, time, err):
if (x not in cls.BAD_VALUES and y not in cls.BAD_VALUES and
z not in cls.BAD_VALUES):
clean_dat.append([round(x, cls.ROUND_DIGITS),
round(y, cls.ROUND_DIGITS),
round(z, cls.ROUND_DIGITS)])
return clean_dat
def _load_stars_object(self):
"""Load object file of list of stars"""
stars = loadFromFile(os.path.join(self.path, self.object_file_name))
if len(stars) == 0:
raise InvalidFile("There are no stars in object file")
if stars[0].__class__.__name__ != "Star":
raise InvalidFile("It is not list of stars")
return stars
@staticmethod
def parseFileName(file_path):
"""Return cleaned name of the star without path and suffix"""
end = None
if file_path.rfind(".") != -1:
end = file_path.rfind(".")
return file_path[file_path.rfind("/") + 1:end]
def _loadFromFITS(self, star_paths, files_lim=None):
if not star_paths:
return []
if files_lim and isinstance(files_lim, int):
star_paths = star_paths[:files_lim]
stars = []
for path in tqdm(star_paths, desc="Loading FITS files:"):
try:
fits_file = fits.open(os.path.join(path))
except Exception as e:
raise InvalidFile("Invalid fits file or path: {}\n{}".format(self.path, e))
stars.append(self._createStarFromFITS(fits_file))
return stars
@classmethod
def _createStarFromFITS(self, fits):
DB_NAME_END = "_name"
DB_IDENT_SEP = "_id_"
prim_hdu = fits[0].header
ra = prim_hdu.get(self.FITS_RA)
dec = prim_hdu.get(self.FITS_DEC)
ra_unit = prim_hdu.get(self.FITS_RA_UNIT)
dec_unit = prim_hdu.get(self.FITS_DEC_UNIT)
star = Star(name=prim_hdu.get(self.FITS_NAME),
coo=(ra, dec, (ra_unit, dec_unit)),
starClass=prim_hdu.get(self.FITS_CLASS))
ident = {}
more = {}
for db_name_key in list(prim_hdu.keys()):
if db_name_key.endswith(DB_NAME_END):
db_name = db_name_key[:-len(DB_NAME_END)]
ident[db_name] = {}
ident[db_name]["name"] = prim_hdu[db_name_key]
elif DB_IDENT_SEP in db_name_key:
db_name, ident_key = db_name_key.split(DB_IDENT_SEP)
if not ident[db_name].get("db_ident"):
ident[db_name]["db_ident"] = {}
ident[db_name]["db_ident"][ident_key] = prim_hdu[db_name_key]
elif db_name_key not in ["SIMPLE", "BITPIX", "NAXIS", "EXTEND", self.FITS_RA, self.FITS_DEC, self.FITS_RA_UNIT, self.FITS_DEC_UNIT, self.FITS_NAME, self.FITS_CLASS]:
more[db_name_key.lower()] = prim_hdu[db_name_key]
star.ident = ident
star.more = more
for lc_hdu in fits[1:]:
star.putLightCurve(self._createLcFromFits(lc_hdu))
fits.close()
return star
@classmethod
def _createLcFromFits(self, fits):
time = []
mag = []
err = []
for line in fits.data:
if len(line) == 3:
t, m, e = line
elif len(line) == 2:
t,m =line
e = 0
else:
if hasattr(line, "__iter__"):
n = len(line)
else:
n = None
raise InvalidFile(
"Light curve binary extension of the fitst couldn't be parsed\nbecause of line {1}\n type: {0}, lenght: {2}".format(type(line), line, n))
time.append(t)
mag.append(m)
err.append(e)
meta = {"xlabel": fits.header.get("TTYPE1", None),
"xlabel_unit": fits.header.get("TUNIT1", None),
"ylabel": fits.header.get("TTYPE2", None),
"ylabel_unit": fits.header.get("TUNIT2", None),
"color": fits.header.get("FILTER", None),
"origin": fits.header.get(self.DB_ORIGIN, None)
}
return LightCurve([time, mag, err], meta)
@classmethod
def writeToFITS(self, file_name, star, clobber=True):
prim_hdu = fits.PrimaryHDU()
prim_hdu.header["IDENT"] = star.name
try:
prim_hdu.header[self.FITS_RA] = star.coo.ra.degree
prim_hdu.header[self.FITS_RA_UNIT] = "deg"
prim_hdu.header[self.FITS_DEC] = star.coo.dec.degree
prim_hdu.header[self.FITS_DEC_UNIT] = "deg"
prim_hdu.header[self.FITS_CLASS] = star.starClass
except AttributeError:
pass
for db, ident in star.ident.items():
prim_hdu.header["HIERARCH " + db + "_name"] = ident["name"]
identifiers = ident.get("db_ident")
if not identifiers:
identifiers = {}
for key, value in identifiers.items():
prim_hdu.header["HIERARCH " + db + "_id_" + key] = value
for it, value in star.more.items():
if len(it) > 8:
it = "HIERARCH " + it
prim_hdu.header[it] = value
hdu_list = fits.HDUList(prim_hdu)
for lc in star.light_curves:
col1 = fits.Column(name=lc.meta.get("xlabel", "hjd"),
unit=lc.meta.get("xlabel_unit", "days"),
format='E', array=lc.time)
col2 = fits.Column(name=(lc.meta.get("ylabel", "magnitude")),
unit=lc.meta.get("ylabel_unit", "mag"),
format='E', array=lc.mag)
col3 = fits.Column(name="error",
unit=lc.meta.get("ylabel_unit", "mag"),
format='E', array=lc.err)
lc_hdu = fits.BinTableHDU.from_columns([col1, col2, col3])
# lc_hdu = fits.new_table(fits.ColDefs([col1, col2, col3]))
lc_hdu.header["FILTER"] = lc.meta.get("color", "")
lc_hdu.header[
"HIERARCH " + self.DB_ORIGIN] = lc.meta.get("origin", "")
hdu_list.append(lc_hdu)
hdu_list.writeto(
file_name, overwrite=clobber)
|
"""
This package is dedicated to AXI Load/Store Units (LSU) a others like.
Other LSU implementations:
* https://github.com/riscv-boom/riscv-boom - has RISC-V LSU in chisel3, read kill, MSHRs
* https://github.com/rsd-devel/rsd - has N issue RISC-V LSU
* https://github.com/bluespec/Toooba - has RISC-V LSU in BlueSpecVerilog
* https://github.com/openhwgroup/cv32e40p - has RISC-V LSU for in order pipeline
""" |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import kv_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import slot_creator
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.FtrlOptimizer"])
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
See this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate. See section 3.1 in the
[paper](https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name,
slot_config=slot_creator.SlotConfig(slot_index=1, slot_num=2))
self._zeros_slot(v, "linear", self._linear_name or self._name,
slot_config=slot_creator.SlotConfig(slot_index=2, slot_num=2))
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
if isinstance(var, kv_variable_ops.EmbeddingVariable):
return training_ops.kv_resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
if isinstance(var, kv_variable_ops.EmbeddingVariable):
return training_ops.kv_resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
from typing import Any, Callable, Generic, List, TypeVar
from mlprogram import logging
logger = logging.Logger(__name__)
V = TypeVar("V")
class Identity(Generic[V]):
def __call__(self, value: V) -> V:
return value
class Flatten(Generic[V]):
def __call__(self, values: List[List[V]]) -> List[V]:
retval = []
for v in values:
retval.extend(v)
return retval
class Threshold(object):
def __init__(self, threshold: float, dtype: str = "bool"):
self.threshold = threshold
assert dtype in set(["bool", "int", "float"])
if dtype == "bool":
self.dtype: Callable[[bool], Any] = bool
elif dtype == "int":
self.dtype = int
elif dtype == "float":
self.dtype = float
def __call__(self, value: float) -> Any:
out = value >= self.threshold
return self.dtype(out)
|
from .score import Score
from .partlist import PartList
from .scorepart import ScorePart
from .part import Part
from .measure import Measure
from .note import Note
from .rest import Rest
from .chord import Chord
from .harmony import Harmony
from .rehearsal import Rehearsal
from .barline import Barline
|
__author__ = 'mosquito'
|
"""Initial Migration
Revision ID: 7ebae7278158
Revises: c984bd5ccad2
Create Date: 2019-02-28 12:40:21.734374
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7ebae7278158'
down_revision = 'c984bd5ccad2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('description', sa.String(length=255), nullable=True))
op.drop_column('pitches', 'Description')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('Description', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.drop_column('pitches', 'description')
# ### end Alembic commands ###
|
import pytest
import numpy as np
from os import path
from astropy.io import fits
from astropy import wcs
from astropy.coordinates import SkyCoord
import astropy.units as u
from .utils_for_test import create_test_ffis
from ..make_cube import CubeFactory
from ..cube_cut import CutoutFactory
from ..exceptions import InvalidQueryError, InputWarning
def checkcutout(cutfile, pixcrd, world, csize, ecube, eps=1.e-7):
"""Check FITS cutout for correctness
Checks RA_OBJ/DEC_OBJ in primary header, and TIME, FLUX, and
FLUX_ERR in table.
Inputs:
cutfile Name of FITS cutout file
pixcrd [2] pixel coordinates for cutout center [cy,cx]
world [2] RA, Dec in degrees for cutout center
csize Integer size of cutout (probably should be odd)
ecube Simulated data cube
eps Maximum allowed distance offset in degrees
Returns True on success, False on failure
"""
ix = int(pixcrd[1])
iy = int(pixcrd[0])
x1 = ix - csize//2
x2 = x1 + csize - 1
y1 = iy - csize//2
y2 = y1 + csize - 1
hdulist = fits.open(cutfile)
ra_obj = hdulist[0].header['RA_OBJ']
dec_obj = hdulist[0].header['DEC_OBJ']
pinput = SkyCoord(world[0], world[1], frame='icrs', unit='deg')
poutput = SkyCoord(ra_obj, dec_obj, frame='icrs', unit='deg')
dist = pinput.separation(poutput).degree
assert dist <= eps, "{} separation in primary header {} too large".format(cutfile, dist)
ntimes = ecube.shape[2]
tab = hdulist[1].data
assert len(tab) == ntimes, "{} expected {} entries, found {}".format(cutfile, ntimes, len(tab))
assert (tab['TIME'] == (np.arange(ntimes)+0.5)).all(), "{} some time values are incorrect".format(cutfile)
check1(tab['FLUX'], x1, x2, y1, y2, ecube[:, :, :, 0], 'FLUX', cutfile)
check1(tab['FLUX_ERR'], x1, x2, y1, y2, ecube[:, :, :, 1], 'FLUX_ERR', cutfile)
# Regression test for PR #6
assert hdulist[2].data.dtype.type == np.int32
return
def check1(flux, x1, x2, y1, y2, ecube, label, cutfile):
"""Test one of flux or error"""
cx = ecube.shape[0]
cy = ecube.shape[1]
if x1 < 0:
assert np.isnan(flux[:, :-x1, :]).all(), "{} {} x1 NaN failure".format(cutfile, label)
if y1 < 0:
assert np.isnan(flux[:, :, :-y1]).all(), "{} {} y1 NaN failure".format(cutfile, label)
if x2 >= cx:
assert np.isnan(flux[:, -(x2-cx+1):, :]).all(), "{} {} x2 NaN failure".format(cutfile, label)
if y2 >= cy:
assert np.isnan(flux[:, :, -(y2-cy+1):]).all(), "{} {} y2 NaN failure".format(cutfile, label)
x1c = max(x1, 0)
y1c = max(y1, 0)
x2c = min(x2, cx-1)
y2c = min(y2, cy-1)
scube = ecube[x1c:x2c, y1c:y2c, :]
sflux = np.moveaxis(flux[:, x1c-x1:x2c-x1, y1c-y1:y2c-y1], 0, -1)
assert (scube == sflux).all(), "{} {} comparison failure".format(cutfile, label)
return
def test_cube_cutout(tmpdir):
"""
Testing the cube cutout functionality.
"""
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im, dir_name=tmpdir)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Read one of the input images to get the WCS
img_header = fits.getheader(ffi_files[0], 1)
cube_wcs = wcs.WCS(img_header)
# get pixel positions at edges and center of image
# somewhat cryptic one-liner to get the grid of points
pval = np.array([0, img_sz//2, img_sz-1], dtype=np.float)
pixcrd = pval[np.transpose(np.reshape(np.mgrid[0:3, 0:3], (2, 9)))]
# add one more giant cutout that goes off all 4 edges
pixcrd = np.append(pixcrd, pixcrd[4].reshape(1, 2), axis=0)
# getting the world coordinates
world_coords = cube_wcs.all_pix2world(pixcrd, 0)
# Getting the cutouts
cutbasename = 'make_cube_cutout_{}.fits'
cutlist = [path.join(tmpdir, cutbasename.format(i)) for i in range(len(world_coords))]
csize = [img_sz//2]*len(world_coords)
csize[-1] = img_sz+5
for i, v in enumerate(world_coords):
coord = SkyCoord(v[0], v[1], frame='icrs', unit='deg')
CutoutFactory().cube_cut(cube_file, coord, csize[i], target_pixel_file=cutlist[i],
output_path=tmpdir, verbose=False)
# expected values for cube
ecube = np.zeros((img_sz, img_sz, num_im, 2))
plane = np.arange(img_sz*img_sz, dtype=np.float32).reshape((img_sz, img_sz))
for i in range(num_im):
ecube[:, :, i, 0] = -plane
ecube[:, :, i, 1] = plane
plane += img_sz*img_sz
# Doing the actual checking
for i, cutfile in enumerate(cutlist):
checkcutout(cutfile, pixcrd[i], world_coords[i], csize[i], ecube)
def test_cutout_extras(tmpdir):
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Making the cutout
myfactory = CutoutFactory()
coord = "256.88 6.38"
###########################
# Test _parse_table_info #
###########################
cutout_size = [5, 3]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size,
output_path=path.join(tmpdir, "out_dir"), verbose=False)
assert "256.880000_6.380000_5x3_astrocut.fits" in out_file
assert isinstance(myfactory.cube_wcs, wcs.WCS)
ra, dec = myfactory.cube_wcs.wcs.crval
assert round(ra, 4) == 250.3497
assert round(dec, 4) == 2.2809
# checking on the center coordinate too
coord = SkyCoord(256.88, 6.38, frame='icrs', unit='deg')
assert myfactory.center_coord.separation(coord) == 0
############################
# Test _get_cutout_limits #
############################
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == cutout_size[0]
assert (ymax-ymin) == cutout_size[1]
cutout_size = [5*u.pixel, 7*u.pixel]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
assert "256.880000_6.380000_5x7_astrocut.fits" in out_file
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == cutout_size[0].value
assert (ymax-ymin) == cutout_size[1].value
cutout_size = [3*u.arcmin, 5*u.arcmin]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
assert "256.880000_6.380000_8x15_astrocut.fits" in out_file
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == 8
assert (ymax-ymin) == 15
cutout_size = [1*u.arcsec, 5*u.arcsec]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
assert "256.880000_6.380000_1x1_astrocut.fits" in out_file
xmin, xmax = myfactory.cutout_lims[0]
ymin, ymax = myfactory.cutout_lims[1]
assert (xmax-xmin) == 1
assert (ymax-ymin) == 1
#############################
# Test _get_full_cutout_wcs #
#############################
cutout_size = [5, 3]
out_file = myfactory.cube_cut(cube_file, coord, cutout_size, verbose=False)
cutout_wcs_full = myfactory._get_full_cutout_wcs(fits.getheader(cube_file, 2))
assert (cutout_wcs_full.wcs.crpix == [1045 - myfactory.cutout_lims[0, 0],
1001 - myfactory.cutout_lims[1, 0]]).all()
########################
# Test _fit_cutout_wcs #
########################
max_dist, sigma = myfactory._fit_cutout_wcs(cutout_wcs_full, (3, 5))
assert max_dist.deg < 1e-05
assert sigma < 1e-05
cry, crx = myfactory.cutout_wcs.wcs.crpix
assert round(cry) == 3
assert round(crx) == 2
##########################
# Test target pixel file #
##########################
# Testing the cutout content is in test_cube_cutout
# this tests that the format of the tpf is what it should be
tpf = fits.open(out_file)
assert tpf[0].header["ORIGIN"] == 'STScI/MAST'
tpf_table = tpf[1].data
assert len(tpf_table.columns) == 12
assert "TIME" in tpf_table.columns.names
assert "FLUX" in tpf_table.columns.names
assert "FLUX_ERR" in tpf_table.columns.names
assert "FFI_FILE" in tpf_table.columns.names
cutout_img = tpf_table[0]['FLUX']
assert cutout_img.shape == (3, 5)
assert cutout_img.dtype.name == 'float32'
aperture = tpf[2].data
assert aperture.shape == (3, 5)
assert aperture.dtype.name == 'int32'
tpf.close()
def test_exceptions(tmpdir):
"""
Testing various error conditions.
"""
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Setting up
myfactory = CutoutFactory()
hdu = fits.open(cube_file)
cube_table = hdu[2].data
# Testing when none of the FFIs have good wcs info
cube_table["WCSAXES"] = 0
with pytest.raises(Exception, match='No FFI rows contain valid WCS keywords.') as e:
myfactory._parse_table_info(cube_table)
assert e.type is wcs.NoWcsKeywordsFoundError
cube_table["WCSAXES"] = 2
# Testing when nans are present
myfactory._parse_table_info(cube_table)
wcs_orig = myfactory.cube_wcs
cube_table["BARYCORR"] = np.nan
myfactory._parse_table_info(cube_table)
assert wcs_orig.to_header_string() == myfactory.cube_wcs.to_header_string()
hdu.close()
# Testing various off the cube inputs
myfactory.center_coord = SkyCoord("50.91092264 6.40588255", unit='deg')
with pytest.raises(Exception, match='Cutout location is not in cube footprint!') as e:
myfactory._get_cutout_limits(np.array([5, 5]))
assert e.type is InvalidQueryError
myfactory.center_coord = SkyCoord("257.91092264 6.40588255", unit='deg')
with pytest.raises(Exception, match='Cutout location is not in cube footprint!') as e:
myfactory._get_cutout_limits(np.array([5, 5]))
assert e.type is InvalidQueryError
# Testing the WCS fitting function
distmax, sigma = myfactory._fit_cutout_wcs(myfactory.cube_wcs, (100, 100))
assert distmax.deg < 0.003
assert sigma < 0.03
distmax, sigma = myfactory._fit_cutout_wcs(myfactory.cube_wcs, (1, 100))
assert distmax.deg < 0.003
assert sigma < 0.03
distmax, sigma = myfactory._fit_cutout_wcs(myfactory.cube_wcs, (100, 2))
assert distmax.deg < 0.03
assert sigma < 0.03
myfactory.center_coord = SkyCoord("256.38994124 4.88986771", unit='deg')
myfactory._get_cutout_limits(np.array([5, 500]))
hdu = fits.open(cube_file)
cutout_wcs = myfactory._get_full_cutout_wcs(hdu[2].header)
hdu.close()
distmax, sigma = myfactory._fit_cutout_wcs(cutout_wcs, (200, 200))
assert distmax.deg < 0.004
assert sigma < 0.2
distmax, sigma = myfactory._fit_cutout_wcs(cutout_wcs, (100, 5))
assert distmax.deg < 0.003
assert sigma < 0.003
distmax, sigma = myfactory._fit_cutout_wcs(cutout_wcs, (3, 100))
assert distmax.deg < 0.003
assert sigma < 0.003
def test_inputs(tmpdir, capsys):
"""
Testing with different user input types/combos. And verbose.
"""
# Making the test cube
cube_maker = CubeFactory()
img_sz = 10
num_im = 100
ffi_files = create_test_ffis(img_sz, num_im)
cube_file = cube_maker.make_cube(ffi_files, path.join(tmpdir, "test_cube.fits"), verbose=False)
# Setting up
myfactory = CutoutFactory()
coord = "256.88 6.38"
cutout_size = [5, 3]*u.pixel
cutout_file = myfactory.cube_cut(cube_file, coord, cutout_size, output_path=tmpdir, verbose=True)
captured = capsys.readouterr()
assert "Image cutout cube shape: (100, 3, 5)" in captured.out
assert "Using WCS from row 50 out of 100" in captured.out
assert "Cutout center coordinate: 256.88,6.38" in captured.out
assert "5x3" in cutout_file
cutout_size = [5, 3]*u.arcmin
cutout_file = myfactory.cube_cut(cube_file, coord, cutout_size, output_path=tmpdir, verbose=False)
assert "14x9" in cutout_file
cutout_size = [5, 3, 9]*u.pixel
with pytest.warns(InputWarning):
cutout_file = myfactory.cube_cut(cube_file, coord, cutout_size, output_path=tmpdir, verbose=False)
assert "5x3" in cutout_file
assert "x9" not in cutout_file
|
import sys, pygame, math
from Ball import Ball
class PlayerBall(Ball):
def __init__(self, images, maxSpeed, pos = [0,0]):
Ball.__init__(self, images, [0,0], pos)
self.maxSpeedx = maxSpeed[0]
self.maxSpeedy = maxSpeed[1]
def collideScreen(self, size):
width = size[0]
height = size[1]
if not self.didBounceX:
if self.rect.left < 0 or self.rect.right > width:
self.speedx = -self.speedx
self.didBounceX = True
self.move()
self.speedx = 0
if not self.didBounceY:
if self.rect.top < 0 or self.rect.bottom > height:
self.speedy = -self.speedy
selfdidBounceY = True
self.move()
self.speedy = 0
def collideBall(self, other):
print "I did it"
if self.rect.right > other.rect.left and self.rect.left < other.rect.right:
if self.rect.bottom > other.rect.top and self.rect.top < other.rect.bottom:
if self.radius + other.radius > self.distanceTo(other.rect.center):
return True
return False
def go(self, direction):
if direction == "up":
self.speedy = -self.maxSpeedy
elif direction == "down":
self.speedy = self.maxSpeedy
if direction == "right":
self.speedx = self.maxSpeedx
elif direction == "left":
self.speedx = -self.maxSpeedx
if direction == "stop up":
self.speedy = 0
elif direction == "stop down":
self.speedy = 0
if direction == "stop right":
self.speedx = 0
elif direction == "stop left":
self.speedx = 0
|
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Configuration utility for distla_core."""
# TODO (mganahl): move this config file one folder up
# and rename to global_config
import jax
import numpy as np
from distla_core.utils import misc
AXIS_NAME = 'i'
def get_axis_name() -> str:
"""
Get the name of the pmapped axis.
This is currently hard coded to 'i'.
"""
return AXIS_NAME
n_hosts_primes = misc.prime_factors(jax.process_count())
n_local_device_primes = misc.prime_factors(jax.local_device_count())
NHCOLS = int(np.prod(n_hosts_primes[::2]))
NHROWS = int(np.prod(n_hosts_primes[1::2]))
NDROWS = int(np.prod(n_local_device_primes[0::2]))
NDCOLS = int(np.prod(n_local_device_primes[1::2]))
NROWS = NHROWS * NDROWS
NCOLS = NHCOLS * NDCOLS
NDPROCS = NDCOLS * NDROWS
NPROCS = NHROWS * NHCOLS * NDPROCS
GRID = (NROWS, NCOLS)
DGRID = (NDROWS, NDCOLS)
HGRID = (NHROWS, NHCOLS)
def get_processor_grid():
""" Returns an array of shape GRID whose (i, j)'th entry is the pmap index
of the processor at the (i, j)'th prow/pcol.
"""
n_local_devices = jax.local_device_count()
asic_node_grid = np.arange(n_local_devices).reshape(NDROWS, NDCOLS, order='F')
hrows = np.concatenate(
[asic_node_grid + n_local_devices * n for n in range(NHROWS)], axis=0).astype(
np.int32)
numel = np.max(hrows.ravel()) + 1
return np.concatenate([hrows + numel * n for n in range(NHCOLS)], axis=1)
|
import pandas as pd
import os
from IPython import display
def get_status(eid, assignment):
stages = ['submitted', 'autograded', 'feedback']
for status in reversed(stages):
if os.path.isdir(f'../nbgrader/{status}/{eid}/{assignment}'):
return status
else:
return ''
def get_link(eid, assignment):
status = get_status(eid, assignment)
output = status and f'<a href="../../formgrader/manage_students/{eid}/{assignment}" target=_blank>{status}</a>'
if status == 'feedback':
output += f'<br><a href="../../tree/nbgrader/feedback/{eid}/{assignment}" target=_blank>(generated)</a>'
return output
def load_students(assignment, csv, sections = None, include_test_student = False):
df = pd.read_csv('cs5483.csv')
if sections:
df = df.loc[df['section'].isin(sections)]
if include_test_student:
df = df.append({'eid': 'test-student'}, ignore_index = True)
df[assignment] = [get_link(row["eid"],assignment) for (index, row) in df.iterrows()]
display.display(display.HTML(f'<a href="../../formgrader/manage_submissions/{assignment}" target=_blank>See all submissions</a>'))
display.display(display.HTML(df.to_html(escape=False)))
# display.display(display.HTML('test-student:<br>'+get_link('test-student',assignment)))
return df
|
from ray.rllib.agents.ars.ars import (ARSAgent, DEFAULT_CONFIG)
__all__ = ["ARSAgent", "DEFAULT_CONFIG"]
|
import torch
import pickle
import os
class ProcessedDataset(torch.utils.data.Dataset):
def __init__(self, data_path, class_index):
with open(data_path, "rb") as fh:
self.dataset = torch.tensor(pickle.load(fh)).float()
self.class_index = torch.tensor(class_index).float()
def get_dataset(self):
# for compatibility
return self
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx], self.class_index
def load_datasets_processed(cfg, datasets, dataset_dir=None):
"""
load preprocessed datasets from a list, return train/test datasets, dataset index and dataset name
"""
if dataset_dir is None:
dataset_dir = cfg["proc_dataset_dir"]
dataset_list = []
class_index = 0
for dataset_name in datasets:
dataset_train_path = os.path.join(dataset_dir, dataset_name + "_train")
dataset_test_path = os.path.join(dataset_dir, dataset_name + "_test")
try:
dataset_train = ProcessedDataset(dataset_train_path, class_index)
dataset_test = ProcessedDataset(dataset_test_path, class_index)
except Exception as e:
print(e)
continue
dataset_list.append((dataset_train, dataset_test, dataset_name, class_index))
class_index += 1
return dataset_list
|
# Generated by Django 3.0.6 on 2020-07-14 02:47
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import thenewboston.utils.validators
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('blocks', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BankTransaction',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('amount', models.DecimalField(decimal_places=16, default=1e-16, max_digits=32, validators=[django.core.validators.MinValueValidator(1e-16), thenewboston.utils.validators.validate_is_real_number])),
('recipient', models.CharField(max_length=64)),
('block', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bank_transactions', to='blocks.Block')),
],
options={
'default_related_name': 'bank_transactions',
},
),
]
|
myval = 3
def mymultiply(num1,num2):
''' The above function will take 2 numbers,
multiply them and return the result '''
return num1 * num2
class multiply1:
pass
|
#!/usr/bin/env python
from codetalker import pgm
from codetalker.pgm.tokens import STRING, ID, NUMBER, WHITE, NEWLINE, ReToken, re, CharToken, StringToken
from codetalker.pgm.special import star, plus, _or, binop
from codetalker.pgm.grammar import ParseError
## TOKENS
class OP(ReToken):
rx = re.compile('\\*\\*|[-+*/%]')
class OP(StringToken):
strings = ['**', '[', '-', '+', '*', '/', '%']
num = 7
class SYMBOL(ReToken):
rx = re.compile('[()]')
class SYMBOL(CharToken):
chars = '()'
num = 2
## RUlES
'''order of operations:
&& || ; not using
+-
*/%
**
()
'''
expression = binop(list('-+'), list('*/%'), ['**'], value=NUMBER, ops_token=OP, name='BinOp', paren=True)
grammar = pgm.Grammar(start=expression, tokens = [SYMBOL, OP], ignore = [WHITE, NEWLINE], ast_tokens=[NUMBER])
m = pgm.Translator(grammar)
ast = grammar.ast_classes
import operator
ops = {'**':operator.pow, '*':operator.mul, '/':operator.div, '%':operator.mod, '+':operator.add, '-':operator.sub}
@m.translates(ast.BinOp)
def binop(node):
value = m.translate(node.left)
for op, right in zip(node.ops, node.values):
nv = m.translate(right)
value = ops[op.value](value, nv)
return value
@m.translates(NUMBER)
def number(node):
return float(node.value)
evaluate = m.from_string
# vim: et sw=4 sts=4
|
from factorization import Factorization
from sequence import Sequence, seq
from unique_representation import UniqueRepresentation
from sage_object import SageObject
from element import (\
is_AdditiveGroupElement,
is_AlgebraElement,
is_CommutativeAlgebraElement,
is_CommutativeRingElement,
is_DedekindDomainElement,
is_EuclideanDomainElement,
is_FieldElement,
is_InfinityElement,
is_IntegralDomainElement,
is_Element,
is_Matrix,
is_MonoidElement,
is_ModuleElement,
is_MultiplicativeGroupElement,
is_PrincipalIdealDomainElement,
is_RingElement,
is_Vector,
canonical_coercion,
get_coercion_model,
coercion_traceback
)
from parent import Parent, is_Parent
from parent_base import ParentWithBase, is_ParentWithBase
from parent_gens import (ParentWithGens,
is_ParentWithGens,
ParentWithAdditiveAbelianGens,
is_ParentWithAdditiveAbelianGens,
ParentWithMultiplicativeAbelianGens,
is_ParentWithMultiplicativeAbelianGens,
localvars)
import proof.all as proof
from formal_sum import FormalSums, FormalSum
from mutability import Mutability
from element_wrapper import ElementWrapper
from list_clone import (ClonableElement, ClonableArray, ClonableIntArray)
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
basicSum = nums[0]
maxVal = nums[0]
# [-2,1,-3,4,-1,2,1,-5,4]
# Output: 6
#[1, 2, 3, 4]
for i in range(1, len(nums)):
basicSum = nums[i] + basicSum
if basicSum < nums[i]:
basicSum = nums[i]
#print("BASIC SUM HERE IS", basicSum)
maxVal = max(maxVal, basicSum)
#print("MAX VAL HERE IS", )
if basicSum < 0:
basicSum = 0
return maxVal |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in WideNDeep model classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.experimental.WideDeepModel')
class WideDeepModel(training.Model):
r"""Wide & Deep Model for regression and classification problems.
This model jointly train a linear and a dnn model.
Example:
```python
linear_model = LinearModel()
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(dnn_model, linear_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([dnn_inputs, linear_inputs], y, epochs)
# or define a single `tf.data.Dataset` that contains a single tensor or
# separate tensors for dnn_inputs and linear_inputs.
dataset = tf.data.Dataset.from_tensors(([dnn_inputs, linear_inputs], y))
combined_model.fit(dataset, epochs)
```
Both linear and dnn model can be pre-compiled and trained separately
before jointly training:
Example:
```python
linear_model = LinearModel()
linear_model.compile('adagrad', 'mse')
linear_model.fit(linear_inputs, y, epochs)
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(dnn_model, linear_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
combined_model.fit([dnn_inputs, linear_inputs], y, epochs)
```
"""
def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
linear_model: a premade LinearModel, its output must match the output of
the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
**kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
Allowed keyword arguments include `name`.
"""
super(WideDeepModel, self).__init__(**kwargs)
self.linear_model = linear_model
self.dnn_model = dnn_model
self.activation = activations.get(activation)
def call(self, inputs):
if not isinstance(inputs, (tuple, list)) or len(inputs) != 2:
linear_inputs = dnn_inputs = inputs
else:
linear_inputs, dnn_inputs = inputs
linear_output = self.linear_model(linear_inputs)
dnn_output = self.dnn_model(dnn_inputs)
output = nest.map_structure(lambda x, y: 0.5 * (x + y), linear_output,
dnn_output)
if self.activation:
return nest.map_structure(self.activation, output)
return output
def _get_optimizers(self):
if isinstance(self.optimizer, (tuple, list)):
return (self.optimizer[0], self.optimizer[1])
else:
return (self.optimizer, self.optimizer)
# This does not support gradient scaling and LossScaleOptimizer.
def _backwards(self, tape, loss):
linear_vars = self.linear_model.trainable_weights # pylint: disable=protected-access
dnn_vars = self.dnn_model.trainable_weights # pylint: disable=protected-access
linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars))
linear_optimizer, dnn_optimizer = self._get_optimizers()
linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
return
def _make_train_function(self):
# TODO(tanzheny): This is a direct copy from super to make it work
# refactor it so that common logic can be shared.
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-copmpile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if not isinstance(K.symbolic_learning_phase(), int):
inputs += [K.symbolic_learning_phase()]
linear_optimizer, dnn_optimizer = self._get_optimizers()
with K.get_graph().as_default():
with K.name_scope('training'):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
params=self.linear_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
params=self.dnn_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += dnn_updates
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with K.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = K.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def get_config(self):
linear_config = generic_utils.serialize_keras_object(self.linear_model)
dnn_config = generic_utils.serialize_keras_object(self.dnn_model)
config = {
'linear_model': linear_config,
'dnn_model': dnn_config,
'activation': activations.serialize(self.activation),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
linear_config = config.pop('linear_model')
linear_model = layer_module.deserialize(linear_config, custom_objects)
dnn_config = config.pop('dnn_model')
dnn_model = layer_module.deserialize(dnn_config, custom_objects)
activation = activations.deserialize(
config.pop('activation', None), custom_objects=custom_objects)
return cls(
linear_model=linear_model,
dnn_model=dnn_model,
activation=activation,
**config)
|
# -*- coding: utf-8-*-
import re
import requests
from bs4 import BeautifulSoup
from openpyxl import Workbook
LOGINURL = 'https://expert.nadzor-info.ru/login'
USERNAME = ''
PASSWORD = ''
URL = ''
SAVE_FILE = 'exp1.xls'
session = requests.Session()
CSRF = re.search(
'(?<=csrftoken=)\w+',
session.get(LOGINURL).headers['Set-Cookie']
)
req_headers = {
'content-type': 'application/x-www-form-urlencoded',
'accept': 'text/html, application/xhtml+xml',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'ru-RU, ru;q=0.8,en-US;q=0.6,en;q=0.4',
'cache-control': 'max-age=0',
'host': 'expert.nadzor-info.ru',
'upgrade-insecure-requests': '1'
}
form_data = {
'username': USERNAME,
'password': PASSWORD,
'remember_me': 'on',
'csrfmiddlewaretoken': CSRF.group(0)
}
def get_url(url, first_question, last_question, questions_count=0):
# Authenticate
r = session.post(LOGINURL, data=form_data, headers=req_headers, allow_redirects=True)
wb = Workbook()
ws = wb.active
for i in range(first_question, last_question + 1):
r2 = session.get('%s/%d/' % (url, i))
# print('---------DATA-----------')
print('#%d, %d' % (i - first_question + 1, i))
print(r2.status_code)
soup = BeautifulSoup(r2.text, 'html.parser')
answer_list = soup.find_all('div', {'class': 'question'})
for tag in answer_list:
q_num = tag.find('h2', {'class': 'question__question'}).text
question = tag.find('div', {'class': 'question__text'}).text
answer = tag.find('div', {'class': 'answers__preparation-answer-text'})
document = tag.find('div', {'class': 'answers__answer-ntd'})
if isinstance(answer, type(None)):
answer = ''
print("no answer")
else:
answer = answer.text
if isinstance(document, type(None)):
document = ''
print("no ntd")
else:
document = document.text
ws.append([
(re.search('\d+', q_num)).group(),
question.strip(), answer.strip(), document.strip()
])
wb.save(SAVE_FILE)
get_url(URL, 133927, 134152)
|
from hypothesis import given
from tests.integration_tests.hints import BoundPortedVerticesPair
from tests.utils import equivalence
from . import strategies
@given(strategies.vertices_pairs)
def test_basic(pair: BoundPortedVerticesPair) -> None:
bound, ported = pair
assert equivalence(bound.is_degenerate, ported.is_degenerate)
|
import wx
import wx.grid
import wx.lib.gizmos as gizmos
import CustomGridRenderer as cgr
import wx.propgrid as wxpg
import wx.lib.scrolledpanel as scrolled
import sqlite3
import time
import os
import typing
from typing import List
from typing import Union
from typing import Tuple
from typing import Dict
from typing import NewType
import Utilities as util
from Debug import debug
import QuestsTab as q
import Links as link
wxTreeListItem = NewType('wxTreeListItem', None)
class QuestsTab:
def __init__(self, root, mainNotebook, link):
self.root = root
self.mainNotebook = mainNotebook
self.link = link
self.init = True
self.currentlySelectedQuestID = 101
self.currentQuestTree = "optional"
self.testIcon = wx.Bitmap("images/unknown.png", wx.BITMAP_TYPE_ANY)
self.rankColors = {
"lr": "#7cd3de",
"hr": "#ffae47",
"mr": "#ffef66",
}
self.initQuestsTab()
def initQuestsTab(self):
self.questPanel = wx.Panel(self.mainNotebook)
self.mainNotebook.AddPage(self.questPanel, "Quests")
self.questSizer = wx.BoxSizer(wx.HORIZONTAL)
self.questTreeSizer = wx.BoxSizer(wx.VERTICAL)
self.questDetailedSizer = wx.BoxSizer(wx.VERTICAL)
questImage = wx.Bitmap("images/hunter-rank-160.png", wx.BITMAP_TYPE_ANY)
self.questImageLabel = wx.StaticBitmap(self.questPanel, bitmap=questImage, size=(160, 160))
self.questDetailsNotebook = wx.Notebook(self.questPanel)
self.questDetailPanel = wx.ScrolledWindow(self.questDetailsNotebook)
self.questDetailSizer = wx.BoxSizer(wx.VERTICAL)
self.questDetailsNotebook.AddPage(self.questDetailPanel, "Detail")
self.questDetailPanel.SetSizer(self.questDetailSizer)
self.questDetailedImagesSizer = wx.BoxSizer(wx.HORIZONTAL)
self.questDetailedImagesSizer.Add(self.questImageLabel, 1, wx.ALIGN_CENTER)
self.questDetailedSizer.Add(self.questDetailedImagesSizer, 1, wx.EXPAND)
self.questDetailedSizer.Add(self.questDetailsNotebook, 5, wx.EXPAND)
self.questSizer.Add(self.questTreeSizer, 0, wx.EXPAND)
self.questSizer.Add(self.questDetailedSizer, 1, wx.EXPAND)
self.questPanel.SetSizer(self.questSizer)
self.initQuestButtons()
self.initSearch()
self.initQuestTree()
self.initQuestDetail()
self.questDetailList.Bind(wx.EVT_SIZE, self.onSize)
self.questDetailPanel.SetScrollRate(20, 20)
def initQuestButtons(self):
quests = ["assigned", "optional", "event", "special", "arena"]
self.questButtonsSizer = wx.BoxSizer(wx.HORIZONTAL)
for i, item in enumerate(quests):
button = wx.Button(self.questPanel, label=item.capitalize(), name=item)
self.questButtonsSizer.Add(button)
button.Bind(wx.EVT_BUTTON, self.onQuestTypeSelection)
self.questTreeSizer.Add(self.questButtonsSizer)
def initSearch(self):
self.searchName = wx.TextCtrl(self.questPanel, name="byName")
self.searchName.SetHint(" search by name")
self.searchName.Bind(wx.EVT_TEXT, self.onSearchTextEnter)
self.searchMonster = wx.TextCtrl(self.questPanel, name="byMonster")
self.searchMonster.SetHint(" search by monster")
self.searchMonster.Bind(wx.EVT_TEXT, self.onSearchTextEnter)
self.currentSearch = self.searchName
self.questButtonsSizer.Add(self.searchName, 0, wx.ALIGN_CENTER_VERTICAL)
self.questButtonsSizer.Add(self.searchMonster, 0, wx.ALIGN_CENTER_VERTICAL)
def onSearchTextEnter(self, event):
self.currentSearch = event.GetEventObject()
self.loadQuestTree()
def initQuestTree(self):
self.questTree = cgr.HeaderBitmapGrid(self.questPanel)
self.questTree.EnableEditing(False)
self.questTree.EnableDragRowSize(False)
self.questTree.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.onQuestSelection)
self.questTreeSizer.Add(self.questTree, 1, wx.EXPAND)
questTreeColumns = {
"Name": [405, None],
"Location": [175, None],
"Zenny": [60, wx.Bitmap("images/zenny.png")],
"id": [0, None],
}
self.questTree.CreateGrid(1, len(questTreeColumns))
self.questTree.SetDefaultRowSize(27, resizeExistingRows=True)
self.questTree.SetRowLabelSize(1)
self.questTree.SetDefaultCellAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
for col, (k, v) in enumerate(questTreeColumns.items()):
if v[1] == None:
self.questTree.SetColLabelValue(col, k)
else:
self.questTree.SetColLabelRenderer(col, cgr.HeaderBitmapColLabelRenderer(v[1], ""))
self.questTree.SetColSize(col, v[0])
self.loadQuestTree()
def loadQuestTree(self):
self.init = True
try:
self.questTree.DeleteRows(0, self.questTree.GetNumberRows())
except:
pass
searchText = self.currentSearch.GetValue().replace("'", "''")
if len(searchText) == 0 or searchText == " ":
sql = """
SELECT q.id, q.name, q.description, q.objective, q.quest_type, q.category, q.location, q.stars, q.zenny
FROM quest q
WHERE q.category = :questCat
ORDER BY q.stars
"""
else:
if self.currentSearch.GetName() == "byMonster":
sql = f"""
SELECT q.id, q.name, q.description, q.objective, q.quest_type, q.category, q.location, q.stars, q.zenny
FROM quest q
JOIN quest_monsters qm
ON qm.id = q.id
WHERE q.category = :questCat
AND qm.monster_name LIKE '%{searchText}%'
AND qm.is_objective = 1
ORDER BY q.stars
"""
else:
sql = f"""
SELECT q.id, q.name, q.description, q.objective, q.quest_type, q.category, q.location, q.stars, q.zenny
FROM quest q
WHERE q.category = :questCat
AND q.name LIKE '%{searchText}%'
ORDER BY q.stars
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, (self.currentQuestTree,))
data = data.fetchall()
quests = []
for row in data:
quests.append(q.Quest(row))
starRanks = {
1: "lr",
2: "lr",
3: "lr",
4: "lr",
5: "lr",
6: "hr",
7: "hr",
8: "hr",
9: "hr"
}
lastStar = 0
for quest in quests:
self.populateQuestTree(quest, lastStar, starRanks[quest.stars])
lastStar = quest.stars
self.init = False
def populateQuestTree(self, quest, lastStar, starIcon):
self.questTree.AppendRows()
row = self.questTree.GetNumberRows() - 1
if lastStar != quest.stars:
img = wx.Bitmap(f"images/rank-stars-24/{starIcon}.png")
self.questTree.SetCellRenderer(row, 0, cgr.ImageTextCellRenderer(
img, f"{quest.stars}", imageOffset=20, colour=util.hexToRGB(self.rankColors[starIcon]), font=wx.Font(wx.FontInfo(9).Bold())))
self.questTree.SetCellBackgroundColour(row, 1, util.hexToRGB(self.rankColors[starIcon]))
self.questTree.SetCellBackgroundColour(row, 2, util.hexToRGB(self.rankColors[starIcon]))
self.questTree.AppendRows()
row = self.questTree.GetNumberRows() - 1
img = wx.Bitmap(f"images/quests-24/{quest.questType}.png")
self.questTree.SetCellRenderer(row, 0, cgr.ImageTextCellRenderer(
img, f"{quest.name}", imageOffset=115))
if os.path.exists(f"images/locations-24/{quest.location}.png"):
img = wx.Bitmap(f"images/locations-24/{quest.location}.png")
else:
img = wx.Bitmap(f"images/unknown.png")
self.questTree.SetCellRenderer(row, 1, cgr.ImageTextCellRenderer(
img, f"{quest.location}", imageOffset=70))
self.questTree.SetCellValue(row, 2, str(quest.zenny))
self.questTree.SetCellValue(row, 3, str(quest.id))
def initQuestDetail(self):
self.questObjectiveLabel = wx.StaticText(self.questDetailPanel, label="Objective:\n")
self.questNameLabel = wx.StaticText(self.questDetailPanel, label="Name:\n")
self.questDescriptionLabel = wx.StaticText(self.questDetailPanel, label="Description")
self.questDetailSizer.Add(self.questObjectiveLabel, 0.1, wx.EXPAND)
self.questDetailSizer.Add(self.questNameLabel, 0.1, wx.EXPAND)
self.questDetailSizer.Add(self.questDescriptionLabel, 0.1, wx.EXPAND)
self.questDetailList = cgr.HeaderBitmapGrid(self.questDetailPanel)
self.questDetailList.Bind(wx.EVT_MOUSEWHEEL, self.onScroll)
self.questDetailList.EnableEditing(False)
self.questDetailList.EnableDragRowSize(False)
self.questDetailSizer.Add(self.questDetailList, 0.1, wx.EXPAND)
self.questDetailList.CreateGrid(6, 2)
self.questDetailList.SetDefaultRowSize(24, resizeExistingRows=True)
self.questDetailList.SetColSize(0, 302)
self.questDetailList.SetColSize(1, 155 - 20)
self.questDetailList.SetDefaultCellAlignment(wx.ALIGN_CENTER, wx.ALIGN_CENTER)
self.questDetailList.SetColLabelSize(2)
self.questDetailList.SetRowLabelSize(1)
self.questMonstersList = wx.ListCtrl(self.questDetailPanel, style=wx.LC_REPORT
| wx.LC_VRULES
| wx.LC_HRULES
)
self.questMonstersList.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.onListDoubleClick)
self.il = wx.ImageList(24, 24)
self.questMonstersList.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.questDetailSizer.Add(self.questMonstersList, 0.1, wx.EXPAND|wx.TOP, 5)
self.materialsRequiredList = wx.ListCtrl(self.questDetailPanel, style=wx.LC_REPORT
| wx.LC_VRULES
| wx.LC_HRULES
)
self.materialsRequiredList.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.onListDoubleClick)
self.materialsRequiredList.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.questDetailSizer.Add(self.materialsRequiredList, 2, wx.EXPAND|wx.TOP, 5)
self.loadQuestDetail()
def loadQuestDetail(self):
self.root.Freeze()
self.questMonstersList.ClearAll()
self.materialsRequiredList.ClearAll()
self.il.RemoveAll()
sql = """
SELECT q.id, q.name, q.description, q.objective, q.quest_type, q.category, q.location, q.stars, q.zenny
FROM quest q
WHERE q.id = :questID
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, (self.currentlySelectedQuestID,))
data = data.fetchone()
quest = q.Quest(data)
starRanks = {
1: "lr",
2: "lr",
3: "lr",
4: "lr",
5: "lr",
6: "hr",
7: "hr",
8: "hr",
9: "hr"
}
questDetail = {
"Stars": [f"images/rank-stars-24/{starRanks[quest.stars]}.png", str(quest.stars)],
"Quest Type": [f"images/quests-24/{quest.questType}.png", str(quest.questType.capitalize())],
"Category": [None, str(quest.category.capitalize())],
"Location": [f"images/locations-24/{quest.location}.png", str(quest.location)],
"Zenny": ["images/zenny.png", str(quest.zenny)],
}
self.questObjectiveLabel.SetFont(self.questObjectiveLabel.GetFont().Bold())
self.questNameLabel.SetFont(self.questNameLabel.GetFont().Bold())
self.questObjectiveLabel.SetLabel(f"{quest.objective}\n")
self.questNameLabel.SetLabel(f"{quest.name}:")
self.questDescriptionLabel.SetLabel(f"{quest.description}\n")
self.questDescriptionLabel.Wrap(600)
self.questDetailList.DeleteRows(0, self.questDetailList.GetNumberRows())
self.questDetailList.AppendRows(len(questDetail))
for i, (k, v) in enumerate(questDetail.items()):
self.questDetailList.SetCellValue(i, 0, k)
if v[0] != None:
padding = " " * 8
if os.path.exists(v[0]):
img = wx.Bitmap(v[0])
else:
img = wx.Bitmap("images/unknown.png")
self.questDetailList.SetCellRenderer(i, 1, cgr.ImageTextCellRenderer(
img, f"{v[1]}", imageOffset=65))
else:
self.questDetailList.SetCellValue(i, 1, v[1])
self.loadQuestMonsters()
self.loadQuestMaterials()
width, height = self.questPanel.GetSize()
self.questPanel.SetSize(width + 1, height + 1)
self.questPanel.SetSize(width, height)
self.root.Thaw()
def loadQuestMonsters(self):
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_LEFT
info.Text = "Name"
self.questMonstersList.InsertColumn(0, info)
self.questMonstersList.SetColumnWidth(0, 380)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_CENTER
info.Text = "Quantity"
self.questMonstersList.InsertColumn(1, info)
self.questMonstersList.SetColumnWidth(1, 100)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_CENTER
info.Text = "Objective"
self.questMonstersList.InsertColumn(2, info)
self.questMonstersList.SetColumnWidth(2, 200)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_LEFT
info.Text = ""
self.questMonstersList.InsertColumn(3, info)
self.questMonstersList.SetColumnWidth(3, 0)
sql = """
SELECT qm.monster_id, qm.monster_name, qm.quantity, qm.is_objective
FROM quest q
JOIN quest_monsters qm
ON qm.id = q.id
WHERE q.id = :questID
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, (self.currentlySelectedQuestID,))
data = data.fetchall()
monsters = []
for row in data:
monsters.append(q.QuestMonster(row))
for mon in monsters:
if mon.name == "Kestodon":
img = self.il.Add(wx.Bitmap(f"images/monsters/24/Kestodon Male.png"))
else:
img = self.il.Add(wx.Bitmap(f"images/monsters/24/{mon.name}.png"))
index = self.questMonstersList.InsertItem(self.questMonstersList.GetItemCount(), mon.name, img)
self.questMonstersList.SetItem(index, 1, f"{mon.quantity}")
if bool(mon.isObjective):
if self.root.pref.unicodeSymbols:
self.questMonstersList.SetItem(index, 2, "✓")
else:
self.questMonstersList.SetItem(index, 2, "Yes")
self.questMonstersList.SetItem(index, 3, f"monster,{mon.id},{mon.name}")
def loadQuestMaterials(self):
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_LEFT
info.Text = "Req. Materials"
self.materialsRequiredList.InsertColumn(0, info)
self.materialsRequiredList.SetColumnWidth(0, 380)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_CENTER
info.Text = ""
self.materialsRequiredList.InsertColumn(1, info)
self.materialsRequiredList.SetColumnWidth(1, 100)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_CENTER
info.Text = "Reward Group"
self.materialsRequiredList.InsertColumn(2, info)
self.materialsRequiredList.SetColumnWidth(2, 200)
info = wx.ListItem()
info.Mask = wx.LIST_MASK_TEXT | wx.LIST_MASK_IMAGE | wx.LIST_MASK_FORMAT
info.Image = -1
info.Align = wx.LIST_FORMAT_CENTER
info.Text = ""
self.materialsRequiredList.InsertColumn(3, info)
self.materialsRequiredList.SetColumnWidth(3, 0)
sql = """
SELECT qr.item_id, it.name, i.category, qr.stack, qr.percentage, qr.reward_group, i.icon_name, i.icon_color
FROM quest q
JOIN quest_rewards qr
ON qr.id = q.id
JOIN item i
ON qr.item_id = i.id
JOIN item_text it
ON it.id = i.id
AND it.lang_id = 'en'
WHERE q.id = :questID
"""
conn = sqlite3.connect("mhw.db")
data = conn.execute(sql, (self.currentlySelectedQuestID,))
data = data.fetchall()
items = []
for row in data:
items.append(q.QuestReward(row))
for item in items:
if os.path.exists(f"images/items-24/{item.iconName}{item.iconColor}.png"):
img = self.il.Add(wx.Bitmap(f"images/items-24/{item.iconName}{item.iconColor}.png"))
else:
img = self.il.Add(wx.Bitmap(f"images/unknown.png"))
index = self.materialsRequiredList.InsertItem(self.materialsRequiredList.GetItemCount(), item.name, img)
self.materialsRequiredList.SetItem(index, 1, f"{item.stack} x {item.percentage}%")
self.materialsRequiredList.SetItem(index, 2, f"{item.rewardGroup}")
self.materialsRequiredList.SetItem(index, 3, f"item,{item.id},{item.category}")
def onQuestSelection(self, event):
if not self.init:
self.currentlySelectedQuestID = self.questTree.GetCellValue(event.GetRow(), 3)
if self.currentlySelectedQuestID != "":
self.loadQuestDetail()
def onQuestTypeSelection(self, event):
self.currentQuestTree = event.GetEventObject().GetName()
self.loadQuestTree()
def onListDoubleClick(self, event):
materialInfo = event.GetEventObject().GetItemText(event.GetEventObject().GetFirstSelected(), 3).split(",")
self.link.event = True
self.link.eventType = materialInfo[0]
materialInfo.remove(materialInfo[0])
if len(materialInfo) == 1:
self.link.info = link.GenericSingleLink(materialInfo[0])
elif len(materialInfo) == 2:
self.link.info = link.GenericDoubleLink(materialInfo)
else:
debug(materialInfo, "materialInfo", "materialInfo length is other than accounted for!")
self.root.followLink()
self.link.reset()
def onSize(self, event):
try:
self.questDetailList.SetColSize(0, self.questDetailPanel.GetSize()[0] * 0.66)
self.questDetailList.SetColSize(1, self.questDetailPanel.GetSize()[0] * 0.34 - 20)
self.questMonstersList.SetColumnWidth(0, self.questDetailPanel.GetSize()[0] * 0.56)
self.questMonstersList.SetColumnWidth(1, self.questDetailPanel.GetSize()[0] * 0.17)
self.questMonstersList.SetColumnWidth(2, self.questDetailPanel.GetSize()[0] * 0.27 - 40)
self.materialsRequiredList.SetColumnWidth(0, self.questDetailPanel.GetSize()[0] * 0.56)
self.materialsRequiredList.SetColumnWidth(1, self.questDetailPanel.GetSize()[0] * 0.17)
self.materialsRequiredList.SetColumnWidth(2, self.questDetailPanel.GetSize()[0] * 0.27 - 40)
except:
pass
def onScroll(self, event):
if event.GetWheelRotation() > 0:
if self.questDetailPanel.GetViewStart()[1] < 3:
self.questDetailPanel.Scroll(0, self.questDetailPanel.GetViewStart()[1] + 1 * -1)
else:
self.questDetailPanel.Scroll(0, self.questDetailPanel.GetViewStart()[1] + 3 * -1)
else:
if self.questDetailPanel.GetViewStart()[1] < 3:
self.questDetailPanel.Scroll(0, self.questDetailPanel.GetViewStart()[1] + 1)
else:
self.questDetailPanel.Scroll(0, self.questDetailPanel.GetViewStart()[1] + 3)
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import os
import os.path
import shutil
import collections
import logging
from .base import BaseProvider
from ..util import log
from ..command import run_command
class CompileProvider(BaseProvider):
'''
Runs a command, such as compiling *.scss or *.less, when an output file
timestamp is older than the source file.
When settings.DEBUG=True, checks for a recompile every request.
When settings.DEBUG=False, checks for a recompile only once per server run.
'''
DEFAULT_OPTIONS = {
'group': 'styles',
# source path to search for, relative to the project directory. possible values are:
# 1. None: a default path is used, such as "{app}/{subdir}/{filename.ext}", prefixed
# with the static root at production; see subclasses for their default filenames.
# 2. function, lambda, or other callable: called as func(provider) and
# should return a string
# 3. str: used directly
'sourcepath': None,
# target path to search for, relative to the project directory. possible values are:
# should resolve to one exact file. possible values:
# 1. None: a default path is used, such as "{app}/{subdir}/{filename.ext}", prefixed
# with the static root at production; see subclasses for their default filenames.
# 2. function, lambda, or other callable: called as func(provider) and
# should return a string
# 3. str: used directly
'targetpath': None,
# explicitly sets the command to be run. possible values:
# 1. None or []: the default command is run
# 2. function, lambda, or other callable: called as func(provider), expects list as return
# 3. list: used directly in the call to subprocess module
'command': [],
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the paths
try:
self.sourcepath, self.targetpath = self.get_cache_item()
checked_previously = True
except AttributeError:
self.sourcepath = os.path.join(settings.BASE_DIR if settings.DEBUG else settings.STATIC_ROOT, self.build_sourcepath())
self.targetpath = os.path.join(settings.BASE_DIR if settings.DEBUG else settings.STATIC_ROOT, self.build_targetpath())
self.set_cache_item((self.sourcepath, self.targetpath))
checked_previously = False
if not settings.DEBUG and checked_previously:
log.debug('%s created for %s [checked previously]', self, self.sourcepath)
return
# do we need to compile?
if not os.path.exists(self.sourcepath):
log.debug('%s created for %s [nonexistent file]', self, self.sourcepath)
elif not self.needs_compile:
log.debug('%s created for %s [already up-to-date]', self, self.sourcepath)
else:
log.debug('%s created for %s [compiling]', self, self.sourcepath)
if not os.path.exists(os.path.dirname(self.targetpath)):
os.makedirs(os.path.dirname(self.targetpath))
run_command(*self.build_command())
def build_sourcepath(self):
# if defined in settings, run the function or return the string
if self.options['sourcepath'] is not None:
return self.options['sourcepath'](self) if callable(self.options['sourcepath']) else self.options['sourcepath']
# build the default
if self.app_config is None:
log.warn('{} skipped: template %s not in project subdir and `targetpath` not in settings', (self.__class__.__qualname__, self.template_relpath))
return self.build_default_sourcepath()
def build_default_sourcepath(self):
# this method is overridden in CompileScssProvider and CompileLessProvider lower in this file
raise ImproperlyConfigured('{} must set `sourcepath` in options (or a subclass can override build_default_sourcepath).'.format(self.__class__.__qualname__))
def build_targetpath(self):
# if defined in settings, run the function or return the string
if self.options['targetpath'] is not None:
return self.options['targetpath'](self) if callable(self.options['targetpath']) else self.options['targetpath']
# build the default
if self.app_config is None:
log.warn('{} skipped: template %s not in project subdir and `targetpath` not in settings', (self.__class__.__qualname__, self.template_relpath))
return self.build_default_targetpath()
def build_default_targetpath(self):
# this method is overridden in CompileScssProvider and CompileLessProvider lower in this file
raise ImproperlyConfigured('{} must set `targetpath` in options (or a subclass can override build_default_targetpath).'.format(self.__class__.__qualname__))
def build_command(self):
'''Returns the command to run, as a list (see subprocess module)'''
# if defined in settings, run the function or return the string
if self.options['command']:
return self.options['command'](self) if callable(self.options['command']) else self.options['command']
# build the default
return self.build_default_command()
def build_default_command(self):
# this method is overridden in CompileScssProvider and CompileLessProvider lower in this file
raise ImproperlyConfigured('{} must set `command` in options (or a subclass can override build_default_command).'.format(self.__class__.__qualname__))
@property
def needs_compile(self):
'''Returns True if self.sourcepath is newer than self.targetpath'''
try:
source_mtime = os.stat(self.sourcepath).st_mtime
except OSError: # no source for this template, so just return
return False
try:
target_mtime = os.stat(self.targetpath).st_mtime
except OSError: # target doesn't exist, so compile
return True
# both source and target exist, so compile if source newer
return source_mtime > target_mtime
###################
### Sass
class CompileScssProvider(CompileProvider):
'''Specialized CompileProvider for SCSS'''
def build_default_sourcepath(self):
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.scss',
)
def build_default_targetpath(self):
# posixpath because URLs use forward slash
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.css',
)
def build_default_command(self):
return [
shutil.which('sass'),
'--load-path={}'.format(settings.BASE_DIR),
self.sourcepath,
self.targetpath,
]
#####################
### Less
class CompileLessProvider(CompileProvider):
'''Specialized CompileProvider that contains settings for *.less files.'''
def build_default_sourcepath(self):
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.less',
)
def build_default_targetpath(self):
# posixpath because URLs use forward slash
return os.path.join(
self.app_config.name,
'styles',
self.template_relpath + '.css',
)
def build_default_command(self):
return [
shutil.which('lessc'),
'--source-map',
self.sourcepath,
self.targetpath,
]
|
"""
purplescript.lexer
~~~~~~~~~~~~
:copyright: (c) 2011 by Martin Rusev.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.1'
from ply import lex
from helper import merge_lists
class Lexer(object):
reserved_words = {
'if' : 'IF',
'elseif': 'ELSEIF',
'else' : 'ELSE',
'endif': 'ENDIF',
'for': 'FOR',
'in': 'IN',
'endfor': 'ENDFOR',
'end' : 'END',
'endclass' : 'ENDCLASS',
'def' : 'DEF',
'class' : 'CLASS',
'extends' : 'EXTENDS',
'implements' : 'IMPLEMENTS',
}
operators = (
'ASSIGNMENT',
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
'OR', 'AND', 'NOT', 'LT', 'LE', 'GT', 'GE',
'EQ', 'NE',
)
delimiters = (
'LPAREN', 'RPAREN', 'LBRACKET',
'RBRACKET', 'LBRACE', 'RBRACE',
'COMMA', 'SEMI', 'COLON', 'DOT', 'RANGE',
)
increment_decrement = (
'INCREMENT', 'DECREMENT',
)
object_oriented =(
'THIS',
)
data_types = (
'STRING',
)
generic_tokens = (
'COMMENT', 'VARIABLE','CONSTANT', 'NUMBER', 'NEW_LINE',
)
tokens = merge_lists(
operators, delimiters, object_oriented,
increment_decrement, data_types, generic_tokens,
reserved_words.values(),
)
# OPERATORS
t_ASSIGNMENT = r'='
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|\|'
t_AND = r'&&'
t_NOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# DELIMITERS
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_SEMI = r';'
t_COLON = r':'
t_DOT = r'\.'
t_RANGE = r'\.\.'
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
def t_NUMBER(self, t):
r'[0-9]+'
t.type = self.reserved_words.get(t.value, 'NUMBER')
return t
def t_VARIABLE(self, t):
r'[A-Za-z_0-9]+'
t.type = self.reserved_words.get(t.value,'VARIABLE')
return t
def t_CONSTANT(self, t):
r'[A-Z_]+'
t.type = self.reserved_words.get(t.value,'CONSTANT')
return t
# OBJECT ORIENTED STUFF
def t_THIS(self, t):
r'@'
return t
# DATA TYPES
def t_STRING(self, t):
r"\'([^\\\n]|(\\.))*?\'"
return t
def t_newline(self,t):
ur'\n+'
t.lexer.lineno += t.value.count("\n")
t_ignore = '\t'
def t_error(self, t):
#print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
def t_COMMENT(self, t):
r'//.*'
t.lexer.lineno += 1
return t
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
def input(self, s):
self.build()
self.lexer.paren_count = 0
self.lexer.input(s)
def token(self):
t = self.lexer.token()
return t
try:
return self.token_stream.next()
except StopIteration:
return None
if __name__== '__main__' :
import os.path
import sys
lexer = Lexer()
token_list = []
file = 'syntax/conditionals.purple'
try:
os.path.isfile(file)
with open(file, 'r') as f:
data = f.read()
lexer.input(data)
while True:
tok = lexer.token()
if not tok: break
element = {'type': tok.type, 'value':tok.value, 'line': tok.lineno, 'position':tok.lexpos}
token_list.append(tok)
print token_list
except:
print "Unexpected error:", sys.exc_info()[0]
raise
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def hc_choice_california(path):
"""Heating and Cooling System Choice in Newly Built Houses in California
a cross-section
*number of observations* : 250
*observation* : households
*country* : California
A dataframe containing :
depvar
heating system, one of gcc (gas central heat with cooling), ecc
(electric central resistence heat with cooling), erc (electric room
resistence heat with cooling), hpc (electric heat pump which
provides cooling also), gc (gas central heat without cooling, ec
(electric central resistence heat without cooling), er (electric
room resistence heat without cooling)
ich.z
installation cost of the heating portion of the system
icca
installation cost for cooling
och.z
operating cost for the heating portion of the system
occa
operating cost for cooling
income
annual income of the household
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `hc_choice_california.csv`.
Returns:
Tuple of np.ndarray `x_train` with 250 rows and 18 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'hc_choice_california.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/HC.csv'
maybe_download_and_extract(path, url,
save_file_name='hc_choice_california.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
""" bmap: static beatmap info (thumbnails, previews, etc.) """
from fastapi import APIRouter
from fastapi import status
from fastapi.requests import Request
from fastapi.responses import RedirectResponse
router = APIRouter(prefix="/map", tags=["Beatmaps"])
# for now, just send everything to osu!
# eventually if we do bmap submission, we'll need this.
@router.get("/{file_path:path}")
async def everything(request: Request):
return RedirectResponse(
url=f"https://b.ppy.sh{request['path']}",
status_code=status.HTTP_301_MOVED_PERMANENTLY,
)
|
import rospy
import numpy as np
from tf.transformations import *
from geometry_msgs.msg import Twist
from gym import spaces
class ActionCollector:
def __init__(self):
self.v_max_ = 0.8
self.w_max_ = 1.2
self.action_library = {
0: {"linear": 0.0, "angular": -self.w_max_},
1: {"linear": self.v_max_, "angular": 0.0},
2: {"linear": 0.0, "angular": self.w_max_},
3: {"linear": self.v_max_, "angular": self.w_max_ / 2},
4: {"linear": self.v_max_, "angular": -self.w_max_ / 2},
5: {"linear": 0.0, "angular": 0.0},
}
self.N_DISCRETE_ACTIONS = len(self.action_library)
self.action_space = spaces.Discrete(self.N_DISCRETE_ACTIONS)
def get_action_space(self):
return self.action_space
def get_cmd_vel(self, action_id):
vel_msg = Twist()
vel_msg.linear.x = self.action_library[action_id]["linear"]
vel_msg.angular.z = self.action_library[action_id]["angular"]
return vel_msg
if __name__ == "__main__":
action_collector = ActionCollector()
print(action_collector.get_cmd_vel(1))
box = spaces.Box(low=3.0, high=4, shape=(2, 2))
print(box)
box.seed(4)
for _ in range(1):
print(box.sample())
min_position = 0
max_position = 10
max_speed = 2
goal_position = 0.5
low = np.array([min_position, -max_speed])
high = np.array([max_position, max_speed])
action_space = spaces.Discrete(3) # action space
observation_space = spaces.Box(low, high) #
print("*" * 10)
print(observation_space)
for _ in range(2):
print(observation_space.sample())
observation_space = spaces.Tuple(
(
spaces.Box(low=0, high=10, shape=(10,), dtype=np.float32),
spaces.Box(low=-10, high=0, shape=(3 + 2,), dtype=np.float32),
)
)
print("2" * 10)
print(observation_space.sample())
print(type(observation_space.sample()))
reward = spaces.Discrete(4)
print(type(reward.sample()))
|
from pyjamas.ui.Composite import Composite
from pyjamas import DeferredCommand
from SchoolCalendarService import SchoolCalendarService
from DynaTableWidget import DynaTableWidget
from Person import Person
from Student import Student
from Professor import Professor
class CalendarProvider:
def __init__(self, owner):
self.owner = owner
self.calService = SchoolCalendarService()
self.lastStartRow = -1
self.lastMaxRows = -1
self.lastPeople = []
def updateRowData(self, startRow, maxRows, acceptor):
if startRow == self.lastStartRow:
if maxRows == self.lastMaxRows:
self.pushResults(acceptor, startRow, self.lastPeople)
return
handler = CalendarProviderHandler(self, acceptor, startRow, maxRows)
self.calService.getPeople(startRow, maxRows, handler)
def pushResults(self, acceptor, startRow, people):
rows = []
for person in people:
rows.append([person.getName(), person.getDescription(), person.getSchedule(self.owner.daysFilter)])
acceptor.accept(startRow, rows)
class CalendarProviderHandler:
def __init__(self, owner, acceptor, startRow, maxRows):
self.owner = owner
self.acceptor = acceptor
self.startRow = startRow
self.maxRows = maxRows
def onRemoteResponse(self, response, requestInfo):
people = response
self.owner.lastStartRow = self.startRow
self.owner.lastMaxRows = self.maxRows
self.owner.lastPeople = people
self.owner.pushResults(self.acceptor, self.startRow, people)
def onRemoteError(self, code, message, request):
self.acceptor.failed(message)
class SchoolCalendarWidget(Composite):
def __init__(self, visibleRows):
Composite.__init__(self)
columns = ["Name", "Description", "Schedule"]
styles = ["name", "desc", "sched"]
self.calProvider = CalendarProvider(self)
self.daysFilter = [True, True, True, True, True, True, True]
self.pendingRefresh = False
self.dynaTable = DynaTableWidget(self.calProvider, columns, styles, visibleRows)
self.initWidget(self.dynaTable)
def getDayIncluded(self, day):
return self.daysFilter[day]
def onLoad(self):
self.dynaTable.refresh()
def setDayIncluded(self, day, included):
if (self.daysFilter[day] == included):
return
self.daysFilter[day] = included
if not self.pendingRefresh:
self.pendingRefresh = True
DeferredCommand.add(self)
def execute(self):
self.pendingRefresh = False
self.dynaTable.refresh()
|
def solution(nums):
'''
type nums == list()
'''
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for json and yaml manipulations of oscal files."""
import datetime
import pathlib
import pytest
from ruamel.yaml import YAML
from ruamel.yaml.parser import ParserError
import trestle.common.const as const
import trestle.oscal.component as component
yaml_path = pathlib.Path('tests/data/yaml/')
json_path = pathlib.Path('tests/data/json/')
encoding = const.FILE_ENCODING
def test_yaml_load() -> None:
"""Test yaml load."""
# happy path
read_file = (yaml_path / 'good_simple.yaml').open('r', encoding=encoding)
yaml = YAML(typ='safe')
obj = yaml.load(read_file)
assert obj is not None
# unhappy path
with pytest.raises(ParserError):
read_file = (yaml_path / 'bad_simple.yaml').open('r', encoding=encoding)
obj = yaml.load(read_file)
def test_yaml_dump(tmp_path: pathlib.Path) -> None:
"""Test yaml load and dump."""
component_name = 'good_component.yaml'
tmp_path = pathlib.Path(tmp_path)
yaml = YAML(typ='safe')
# happy path
read_file = (yaml_path / component_name).open('r', encoding=encoding)
component_obj = yaml.load(read_file)
read_file.close()
assert component_obj is not None
dump_name = tmp_path / component_name
write_file = dump_name.open('w', encoding=encoding)
yaml.dump(component_obj, write_file)
write_file.close()
read_file = dump_name.open('r', encoding=encoding)
saved_component = yaml.load(read_file)
assert saved_component is not None
assert saved_component == component_obj
def test_oscal_model(tmp_path: pathlib.Path) -> None:
"""Test pydantic oscal model."""
good_component_name = 'good_component.yaml'
tmp_path = pathlib.Path(tmp_path)
tmp_path = pathlib.Path(tmp_path)
# load good component
read_file = yaml_path / good_component_name
assert read_file.exists()
component_obj = component.ComponentDefinition.oscal_read(read_file)
assert component_obj is not None
# write the oscal componet def out as yaml
dump_name = tmp_path / good_component_name
component_obj.oscal_write(dump_name)
# read it back in
component_reload = component.ComponentDefinition.oscal_read(dump_name)
assert component_reload is not None
# confirm same
assert component_obj == component_reload
# confirm it really is checking the time
component_reload.metadata.last_modified = datetime.datetime.now()
assert component_obj != component_reload
# load good target with different timezone
read_file = yaml_path / 'good_component_diff_tz.yaml'
component_diff_tz = component.ComponentDefinition.oscal_read(read_file)
assert component_diff_tz is not None
# confirm same since different timezones but same utc time
assert component_obj == component_diff_tz
# try to load file with no timezone specified
read_file = yaml_path / 'bad_component_no_tz.yaml'
# confirm the load fails because it is invalid without timezone specified
try:
_ = component.ComponentDefinition.oscal_read(read_file)
except Exception:
assert True
else:
assert AssertionError()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad with accumulator decay for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import kv_variable_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import slot_creator
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdagradDecayOptimizer")
class AdagradDecayOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm with accumulator decay.
Different from the original Adagrad algorithm, AdagradDecay performs decay
at given step with given rate. So that the accumulator will not be infinity.
"""
def __init__(self, learning_rate, global_step,
initial_accumulator_value=0.1,
accumulator_decay_step=100000,
accumulator_decay_rate=0.9,
use_locking=False, name="AdagradDecay"):
"""Construct a new AdagradDecay optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
global_step: global step variable.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
accumulator_decay_step: When global_step reaches times of accumulator_decay_step,
accumulator will be decayed with accumulator_decay_rate.
accumulator *= accumulator_decay_rate
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "AdagradDecay".
Raises:
ValueError: If the `initial_accumulator_value`, `accumulator_decay_step`
or `accumulator_decay_rate` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
if accumulator_decay_step <= 0:
raise ValueError("accumulator_decay_step must be positive: %s" %
accumulator_decay_step)
if accumulator_decay_rate <= 0.0 or accumulator_decay_rate >= 1.0:
raise ValueError("accumulator_decay_rate must be in (0.0, 1.0): %s" %
accumulator_decay_rate)
super(AdagradDecayOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._global_step = global_step
self._initial_accumulator_value = initial_accumulator_value
self._accumulator_decay_step = accumulator_decay_step
self._accumulator_decay_rate = accumulator_decay_rate
# Created in Initialize.
self._learning_rate_tensor = None
self._accumulator_decay_step_tensor = None
self._accumulator_decay_rate_tensor = None
self._accumulator_baseline_tensor = None
self._global_step_on_worker = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
dtype = v.dtype.base_dtype
v_shape = v.get_shape()
if v_shape.is_fully_defined():
init = init_ops.constant_initializer(self._initial_accumulator_value,
dtype=dtype)
else:
# Use a Tensor instead of initializer if variable does not have static
# shape.
init_constant = gen_array_ops.fill(array_ops.shape(v),
self._initial_accumulator_value)
init = math_ops.cast(init_constant, dtype)
self._get_or_make_slot_with_initializer(v, init, v_shape, dtype,
"accumulator", self._name,
slot_config=slot_creator.SlotConfig(slot_index=1, slot_num=2))
self._get_or_make_slot_with_initializer(
v, init_ops.zeros_initializer(self._global_step.dtype),
v_shape, self._global_step.dtype, "accumulator_decay_power", self._name,
slot_config=slot_creator.SlotConfig(slot_index=2, slot_num=2))
# A slot to record how many times of decay has been operated on this index.
# For a variable whose gradients are dense, only a scalar is needed.
# But we have not known that whose gradients are sparse until ApplyGradients.
# So we create a slot with shape of the var's first dimension.
# For case of sparse gradients, such as variable used for sparse embedding,
# the slot will have the shape of the var's first dimension, which will be
# updated based on the gradient indices.
# decay_powers_shape = []
# if v_shape.ndims is not None and v_shape.ndims > 0:
# decay_powers_shape.append(v_shape.dims[0])
# self._get_or_make_slot_with_initializer(
# v, init_ops.zeros_initializer(self._global_step.dtype),
# tensor_shape.TensorShape(decay_powers_shape),
# self._global_step.dtype, "accumulator_decay_power", self._name)
# The above code works correctly in UT, but makes some SHAPE error when a Saver used.
# It occurs since TF1.4 and has not figured out the reason.
# So we use a slot with the shape as v, which may introduce waste of memory.
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._accumulator_decay_step_tensor = math_ops.cast(
ops.convert_to_tensor(self._accumulator_decay_step,
name="accumulator_decay_step"),
self._global_step.dtype.base_dtype)
self._accumulator_decay_rate_tensor = ops.convert_to_tensor(
self._accumulator_decay_rate, name="accumulator_decay_rate")
self._accumulator_baseline_tensor = ops.convert_to_tensor(
self._initial_accumulator_value, name="accumulator_baseline")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.colocate_with(self._learning_rate_tensor):
self._global_step_on_worker = array_ops.identity(self._global_step) + 1
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
acc_decay_power = self.get_slot(var, "accumulator_decay_power")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.apply_adagrad_decay(
var,
acc,
acc_decay_power,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
self._accumulator_decay_step_tensor,
math_ops.cast(self._accumulator_decay_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._accumulator_baseline_tensor, var.dtype.base_dtype),
global_step,
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
acc_decay_power = self.get_slot(var, "accumulator_decay_power")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.resource_apply_adagrad_decay(
var.handle,
acc.handle,
acc_decay_power.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
self._accumulator_decay_step_tensor,
math_ops.cast(self._accumulator_decay_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._accumulator_baseline_tensor, grad.dtype.base_dtype),
global_step,
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
acc_decay_power = self.get_slot(var, "accumulator_decay_power")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.sparse_apply_adagrad_decay(
var,
acc,
acc_decay_power,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
self._accumulator_decay_step_tensor,
math_ops.cast(self._accumulator_decay_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._accumulator_baseline_tensor, var.dtype.base_dtype),
global_step,
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
acc = self.get_slot(var, "accumulator")
acc_decay_power = self.get_slot(var, "accumulator_decay_power")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
if isinstance(var, kv_variable_ops.EmbeddingVariable):
return training_ops.kv_resource_sparse_apply_adagrad_decay(
var.handle,
acc.handle,
acc_decay_power.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
self._accumulator_decay_step_tensor,
math_ops.cast(self._accumulator_decay_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._accumulator_baseline_tensor, grad.dtype.base_dtype),
global_step,
grad,
indices,
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_adagrad_decay(
var.handle,
acc.handle,
acc_decay_power.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
self._accumulator_decay_step_tensor,
math_ops.cast(self._accumulator_decay_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._accumulator_baseline_tensor, grad.dtype.base_dtype),
global_step,
grad,
indices,
use_locking=self._use_locking)
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
import pytest
from case import patch
from kombu.utils.json import dumps
from celery.bin.call import call
from celery.five import WhateverIO
class test_call:
def setup(self):
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
@patch('celery.app.base.Celery.send_task')
def test_run(self, send_task):
a = call(app=self.app, stderr=WhateverIO(), stdout=WhateverIO())
a.run(self.add.name)
send_task.assert_called()
a.run(self.add.name,
args=dumps([4, 4]),
kwargs=dumps({'x': 2, 'y': 2}))
assert send_task.call_args[1]['args'], [4 == 4]
assert send_task.call_args[1]['kwargs'] == {'x': 2, 'y': 2}
a.run(self.add.name, expires=10, countdown=10)
assert send_task.call_args[1]['expires'] == 10
assert send_task.call_args[1]['countdown'] == 10
now = datetime.now()
iso = now.isoformat()
a.run(self.add.name, expires=iso)
assert send_task.call_args[1]['expires'] == now
with pytest.raises(ValueError):
a.run(self.add.name, expires='foobaribazibar')
|
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary
from src.decoding import beam_search, ensemble_beam_search
from src.metric.bleu_scorer import SacreBLEUScorer
from src.models import build_model
from src.modules.criterions import NMTCriterion
from src.optim import Optimizer
from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler, RsqrtScheduler
from src.utils.common_utils import *
from src.utils.configs import default_configs, pretty_configs
from src.utils.logging import *
from src.utils.moving_average import MovingAverage
BOS = Vocabulary.BOS
EOS = Vocabulary.EOS
PAD = Vocabulary.PAD
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location="cpu"):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]] #
sorted_indices = np.argsort(lengths)
# sorting inputs
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
# split shards
total_batch = sorted_indices.shape[0] # total number of batches
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
Args:
eval ('bool'): indicator for eval/infer.
Returns:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def compute_forward(model,
critic,
seqs_x,
seqs_y,
eval=False,
normalization=1.0,
norm_by_words=False
):
"""
:type model: nn.Module
:type critic: NMTCriterion
"""
y_inp = seqs_y[:, :-1].contiguous()
y_label = seqs_y[:, 1:].contiguous()
words_norm = y_label.ne(PAD).float().sum(1)
if not eval:
model.train()
critic.train()
# For training
with torch.enable_grad():
log_probs = model(seqs_x, y_inp)
loss = critic(inputs=log_probs, labels=y_label, reduce=False, normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
# For compute loss
with torch.no_grad():
log_probs = model(seqs_x, y_inp)
loss = critic(inputs=log_probs, labels=y_label, normalization=normalization, reduce=True)
return loss.item()
def loss_validation(model, critic, valid_iterator):
"""
:type model: Transformer
:type critic: NMTCriterion
:type valid_iterator: DataIterator
"""
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
_, seqs_x, seqs_y = batch
n_sents += len(seqs_x)
n_tokens += sum(len(s) for s in seqs_y)
x, y = prepare_data(seqs_x, seqs_y, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model,
critic=critic,
seqs_x=x,
seqs_y=y,
eval=True)
if np.isnan(loss):
WARN("NaN detected!")
sum_loss += float(loss)
return float(sum_loss / n_sents)
def bleu_validation(uidx,
valid_iterator,
model,
bleu_scorer,
vocab_tgt,
batch_size,
valid_dir="./valid",
max_steps=10,
beam_size=5,
alpha=-1.0
):
model.eval()
numbers = []
trans = []
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator(batch_size=batch_size)
for batch in valid_iter:
seq_nums = batch[0]
numbers += seq_nums
seqs_x = batch[1]
infer_progress_bar.update(len(seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=model, beam_size=beam_size, max_steps=max_steps, src_seqs=x, alpha=alpha)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
x_tokens = []
for wid in sent_t[0]:
if wid == EOS:
break
x_tokens.append(vocab_tgt.id2token(wid))
if len(x_tokens) > 0:
trans.append(vocab_tgt.tokenizer.detokenize(x_tokens))
else:
trans.append('%s' % vocab_tgt.id2token(EOS))
origin_order = np.argsort(numbers).tolist()
trans = [trans[ii] for ii in origin_order]
infer_progress_bar.close()
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx))
with open(hyp_path, 'w') as f:
for line in trans:
f.write('%s\n' % line)
with open(hyp_path) as f:
bleu_v = bleu_scorer.corpus_bleu(f)
return bleu_v
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"""
Args:
nmt_model: model.
pretrain_path ('str'): path to pretrained model.
map_dict ('dict'): mapping specific parameter names to those names
in current model.
exclude_prefix ('dict'): excluding parameters with specific names
for pretraining.
Raises:
ValueError: Size not match, parameter name not match or others.
"""
if exclude_prefix is None:
exclude_prefix = []
if pretrain_path != "":
INFO("Loading pretrained model from {}".format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for name, params in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO("Loading param: {}...".format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN("{}: {}".format(str(Exception), e))
INFO("Pretrained model loaded.")
def train(FLAGS):
"""
FLAGS:
saveto: str
reload: store_true
config_path: str
pretrain_path: str, default=""
model_name: str
log_path: str
"""
# write log of training to file.
write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S")))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = "cpu"
else:
CURRENT_DEVICE = "cuda:0"
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
# Add default configs
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX)
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"])
train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"])
train_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['train_data'][0],
vocabulary=vocab_src,
max_len=data_configs['max_len'][0],
),
TextLineDataset(data_path=data_configs['train_data'][1],
vocabulary=vocab_tgt,
max_len=data_configs['max_len'][1],
),
shuffle=training_configs['shuffle']
)
valid_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['valid_data'][0],
vocabulary=vocab_src,
),
TextLineDataset(data_path=data_configs['valid_data'][1],
vocabulary=vocab_tgt,
)
)
training_iterator = DataIterator(dataset=train_bitext_dataset,
batch_size=train_batch_size,
use_bucket=training_configs['use_bucket'],
buffer_size=train_buffer_size,
batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset,
batch_size=training_configs['valid_batch_size'],
use_bucket=True, buffer_size=100000, numbering=True)
bleu_scorer = SacreBLEUScorer(reference_path=data_configs["bleu_valid_reference"],
num_refs=data_configs["num_refs"],
lang_pair=data_configs["lang_pair"],
sacrebleu_args=training_configs["bleu_valid_configs"]['sacrebleu_args'],
postprocess=training_configs["bleu_valid_configs"]['postprocess']
)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
# ================================ Begin ======================================== #
# Build Model & Optimizer
# We would do steps below on after another
# 1. build models & criterion
# 2. move models & criterion to gpu if needed
# 3. load pre-trained model if needed
# 4. build optimizer
# 5. build learning rate scheduler if needed
# 6. load checkpoints if needed
# 0. Initial
model_collections = Collections()
checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs['num_kept_checkpoints']
)
best_model_saver = Saver(save_prefix=best_model_prefix, num_max_keeping=training_configs['num_kept_best_model'])
# 1. Build Model & Criterion
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **model_configs)
INFO(nmt_model)
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# 2. Move to GPU
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
# 3. Load pretrained model if needed
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
# 4. Build optimizer
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'],
model=nmt_model,
lr=lrate,
grad_clip=optimizer_configs['grad_clip'],
optim_args=optimizer_configs['optimizer_params']
)
# 5. Build scheduler for optimizer if needed
if optimizer_configs['schedule_method'] is not None:
if optimizer_configs['schedule_method'] == "loss":
scheduler = ReduceOnPlateauScheduler(optimizer=optim,
**optimizer_configs["scheduler_configs"]
)
elif optimizer_configs['schedule_method'] == "noam":
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif optimizer_configs["schedule_method"] == "rsqrt":
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs["scheduler_configs"])
else:
WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
# 6. build moving average
if training_configs['moving_average_method'] is not None:
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'],
named_params=nmt_model.named_parameters(),
alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# Reload from latest checkpoint
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Prepare training
eidx = model_collections.get_collection("eidx", [0])[-1]
uidx = model_collections.get_collection("uidx", [0])[-1]
bad_count = model_collections.get_collection("bad_count", [0])[-1]
oom_count = model_collections.get_collection("oom_count", [0])[-1]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
best_valid_loss = 1.0 * 1e10 # Max Float
saving_files = []
# Timer for computing speed
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar("Epoch", (eidx + 1), uidx)
# Build iterator and progress bar
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epoch %d) ' % eidx,
total=len(training_iterator),
unit="sents"
)
for batch in training_iter:
uidx += 1
if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss":
scheduler.step(global_step=uidx)
seqs_x, seqs_y = batch
n_samples_t = len(seqs_x)
n_words_t = sum(len(s) for s in seqs_y)
cum_samples += n_samples_t
cum_words += n_words_t
training_progress_bar.update(n_samples_t)
optim.zero_grad()
try:
# Prepare data
for seqs_x_t, seqs_y_t in split_shard(seqs_x, seqs_y, split_size=training_configs['update_cycle']):
x, y = prepare_data(seqs_x_t, seqs_y_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model,
critic=critic,
seqs_x=x,
seqs_y=y,
eval=False,
normalization=n_samples_t,
norm_by_words=training_configs["norm_by_words"])
optim.step()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ma is not None and eidx >= training_configs['moving_average_start_epoch']:
ma.step()
# ================================================================================== #
# Display some information
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
# words per second and sents per second
words_per_sec = cum_words / (timer.toc(return_seconds=True))
sents_per_sec = cum_samples / (timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar("Speed(sents/sec)", scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx)
# Reset timer
timer.tic()
cum_words = 0
cum_samples = 0
# ================================================================================== #
# Saving checkpoints
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
if not is_early_stop:
checkpoint_saver.save(global_step=uidx, model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Loss Validation & Learning rate annealing
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model,
critic=critic,
valid_iterator=valid_iterator,
)
model_collections.add_to_collection("history_losses", valid_loss)
min_history_loss = np.array(model_collections.get_collection("history_losses")).min()
summary_writer.add_scalar("loss", valid_loss, global_step=uidx)
summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if optimizer_configs["schedule_method"] == "loss":
scheduler.step(metric=best_valid_loss)
# ================================================================================== #
# BLEU Validation & Early Stop
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx,
every_n_step=training_configs['bleu_valid_freq'],
min_step=training_configs['bleu_valid_warmup'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_bleu = bleu_validation(uidx=uidx,
valid_iterator=valid_iterator,
batch_size=training_configs["bleu_valid_batch_size"],
model=nmt_model,
bleu_scorer=bleu_scorer,
vocab_tgt=vocab_tgt,
valid_dir=FLAGS.valid_path,
max_steps=training_configs["bleu_valid_configs"]["max_steps"],
beam_size=training_configs["bleu_valid_configs"]["beam_size"],
alpha=training_configs["bleu_valid_configs"]["alpha"]
)
model_collections.add_to_collection(key="history_bleus", value=valid_bleu)
best_valid_bleu = float(np.array(model_collections.get_collection("history_bleus")).max())
summary_writer.add_scalar("bleu", valid_bleu, uidx)
summary_writer.add_scalar("best_bleu", best_valid_bleu, uidx)
# If model get new best valid bleu score
if valid_bleu >= best_valid_bleu:
bad_count = 0
if is_early_stop is False:
# 1. save the best model
torch.save(nmt_model.state_dict(), best_model_prefix + ".final")
# 2. record all several best models
best_model_saver.save(global_step=uidx, model=nmt_model)
else:
bad_count += 1
# At least one epoch should be traversed
if bad_count >= training_configs['early_stop_patience'] and eidx > 0:
is_early_stop = True
WARN("Early Stop!")
summary_writer.add_scalar("bad_count", bad_count, uidx)
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
INFO("{0} Loss: {1:.2f} BLEU: {2:.2f} lrate: {3:6f} patience: {4}".format(
uidx, valid_loss, valid_bleu, lrate, bad_count
))
training_progress_bar.close()
eidx += 1
if eidx > training_configs["max_epochs"]:
break
def translate(FLAGS):
GlobalNames.USE_GPU = FLAGS.use_gpu
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
valid_dataset = TextLineDataset(data_path=FLAGS.source_path,
vocabulary=vocab_src)
valid_iterator = DataIterator(dataset=valid_dataset,
batch_size=FLAGS.batch_size,
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# ================================================================================== #
# Build Model & Sampler & Validation
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **model_configs)
nmt_model.eval()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Reloading model parameters...')
timer.tic()
params = load_model_parameters(FLAGS.model_path, map_location="cpu")
nmt_model.load_state_dict(params)
if GlobalNames.USE_GPU:
nmt_model.cuda()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Begin...')
result_numbers = []
result = []
n_words = 0
timer.tic()
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
numbers, seqs_x = batch
batch_size_t = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=nmt_model, beam_size=FLAGS.beam_size, max_steps=FLAGS.max_steps,
src_seqs=x, alpha=FLAGS.alpha)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
result.append(sent_t)
n_words += len(sent_t[0])
result_numbers += numbers
infer_progress_bar.update(batch_size_t)
infer_progress_bar.close()
INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True))))
translation = []
for sent in result:
samples = []
for trans in sent:
sample = []
for w in trans:
if w == vocab_tgt.EOS:
break
sample.append(vocab_tgt.id2token(w))
samples.append(vocab_tgt.tokenizer.detokenize(sample))
translation.append(samples)
# resume the ordering
origin_order = np.argsort(result_numbers).tolist()
translation = [translation[ii] for ii in origin_order]
keep_n = FLAGS.beam_size if FLAGS.keep_n <= 0 else min(FLAGS.beam_size, FLAGS.keep_n)
outputs = ['%s.%d' % (FLAGS.saveto, i) for i in range(keep_n)]
with batch_open(outputs, 'w') as handles:
for trans in translation:
for i in range(keep_n):
if i < len(trans):
handles[i].write('%s\n' % trans[i])
else:
handles[i].write('%s\n' % 'eos')
def ensemble_translate(FLAGS):
GlobalNames.USE_GPU = FLAGS.use_gpu
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
valid_dataset = TextLineDataset(data_path=FLAGS.source_path,
vocabulary=vocab_src)
valid_iterator = DataIterator(dataset=valid_dataset,
batch_size=FLAGS.batch_size,
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# ================================================================================== #
# Build Model & Sampler & Validation
INFO('Building model...')
timer.tic()
nmt_models = []
model_path = FLAGS.model_path
for ii in range(len(model_path)):
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **model_configs)
nmt_model.eval()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Reloading model parameters...')
timer.tic()
params = load_model_parameters(model_path[ii], map_location="cpu")
nmt_model.load_state_dict(params)
if GlobalNames.USE_GPU:
nmt_model.cuda()
nmt_models.append(nmt_model)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Begin...')
result_numbers = []
result = []
n_words = 0
timer.tic()
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
numbers, seqs_x = batch
batch_size_t = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = ensemble_beam_search(nmt_models=nmt_models, beam_size=FLAGS.beam_size, max_steps=FLAGS.max_steps,
src_seqs=x, alpha=FLAGS.alpha)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
result.append(sent_t)
n_words += len(sent_t[0])
infer_progress_bar.update(batch_size_t)
infer_progress_bar.close()
INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True))))
translation = []
for sent in result:
samples = []
for trans in sent:
sample = []
for w in trans:
if w == vocab_tgt.EOS:
break
sample.append(vocab_tgt.id2token(w))
samples.append(vocab_tgt.tokenizer.detokenize(sample))
translation.append(samples)
# resume the ordering
origin_order = np.argsort(result_numbers).tolist()
translation = [translation[ii] for ii in origin_order]
keep_n = FLAGS.beam_size if FLAGS.keep_n <= 0 else min(FLAGS.beam_size, FLAGS.keep_n)
outputs = ['%s.%d' % (FLAGS.saveto, i) for i in range(keep_n)]
with batch_open(outputs, 'w') as handles:
for trans in translation:
for i in range(keep_n):
if i < len(trans):
handles[i].write('%s\n' % trans[i])
else:
handles[i].write('%s\n' % 'eos')
|
# Day 08 - Part 2
import sys
sys.setrecursionlimit(10000)
print "Day 08 - Part 2"
with open("./day08-input.txt") as f:
instructions = f.read().splitlines()
def parse_instruction(instruction):
command, value_as_string = instruction.split()
return command, int(value_as_string)
class ProgramLoop(Exception):
pass
class ProgramRunner(object):
def __init__(self, instructions):
self.executed_instructions = []
self.acc = 0
self.temp_acc = 0
self.instructions = instructions
self.debug_node_index = None
def run(self):
print "Running Handheld console program"
if not self.instructions:
print "You must feed me with instructions."
return
self._execute_instruction_by_index(0)
print "Success!"
print "Acc value: {}".format(self.acc)
print "Node debugged: {}".format(self.debug_node_index)
def _execute_instruction_by_index(self, index, force_disable_debug=False):
# Checks end
if index >= len(self.instructions):
return
# Checks loop
if index in self.executed_instructions:
if self.debug_node_index is not None:
self._exit_debug_mode()
return
else:
raise ProgramLoop("Something really strange happened")
self.executed_instructions.append(index)
instruction = self.instructions[index]
command, value = parse_instruction(instruction)
if command == "acc":
self.acc += value
self._execute_instruction_by_index(index + 1)
return
if not force_disable_debug and self.debug_node_index is None:
self._enter_debug_mode(index)
if command == "jmp":
command = "nop"
else:
command = "jmp"
if command == "jmp":
self._execute_instruction_by_index(index + value)
else:
self._execute_instruction_by_index(index + 1)
def _check_if_nop_change_would_instantly_lead_to_end(self, value):
return value + self.current_index >= len(self.instructions) - 5
def _enter_debug_mode(self, index):
print "Entering debug mode for index {}".format(index)
self.debug_node_index = index
self.temp_acc = self.acc
def _exit_debug_mode(self):
print "Exiting debug mode for node {}".format(self.debug_node_index)
debug_node_index = self.debug_node_index
self.debug_node_index = None
self.executed_instructions = self.executed_instructions[
: self.executed_instructions.index(debug_node_index)
]
self.acc = self.temp_acc
self._execute_instruction_by_index(debug_node_index, force_disable_debug=True)
program_runner = ProgramRunner(instructions)
program_runner.run()
|
import os
import tempfile
from contextlib import contextmanager, nullcontext
from functools import partial, update_wrapper
import cyvcf2
import hypothesis as ht
import pytest
from api.tests.util import FlaskClientProxy
from vardb.datamodel import allele, annotation
from vardb.deposit.annotationconverters import AnnotationConverters
from vardb.deposit.importers import build_allele_from_record
from vardb.util import DB
from vardb.util.testdatabase import TestDatabase
from vardb.util.vcfiterator import RESERVED_GT_HEADERS, VcfIterator
ht.settings.register_profile("default", deadline=600)
ht.settings.register_profile("small", max_examples=20)
ht.settings.register_profile(
"extensive",
max_examples=3000,
timeout=900,
suppress_health_check=[ht.HealthCheck.hung_test],
deadline=2000,
)
ht.settings.register_profile(
"soak",
max_examples=1_000_000,
timeout=ht.unlimited,
suppress_health_check=[ht.HealthCheck.hung_test],
deadline=2000,
)
hypothesis_profile = os.environ.get("HYPOTHESIS_PROFILE", "default").lower()
ht.settings.load_profile(hypothesis_profile)
@pytest.yield_fixture
def session(request):
db = DB()
db.connect()
session = db.session()
yield session
# Close session on teardown
session.close()
db.disconnect()
# Will be shared among all tests
@pytest.yield_fixture(scope="session", autouse=True)
def test_database(request):
"""
The TestDatabase object is yielded in order for the user to
be able to call refresh() when he wants a fresh database.
"""
test_db = TestDatabase()
test_db.refresh()
yield test_db
# Cleanup database on teardown
test_db.cleanup()
@pytest.fixture
def client():
"""
Fixture for a flask client proxy, that supports get, post etc.
"""
return FlaskClientProxy()
allele_start = 0
def _create_annotation(annotations, allele=None, allele_id=None):
annotations.setdefault("external", {})
annotations.setdefault("frequencies", {})
annotations.setdefault("prediction", {})
annotations.setdefault("references", [])
annotations.setdefault("transcripts", [])
for t in annotations["transcripts"]:
t.setdefault("consequences", [])
t.setdefault("transcript", "NONE_DEFINED")
t.setdefault("strand", 1)
t.setdefault("is_canonical", True)
t.setdefault("in_last_exon", "no")
kwargs = {"annotations": annotations}
if allele:
kwargs["allele"] = allele
elif allele_id:
kwargs["allele_id"] = allele_id
kwargs["annotation_config_id"] = 1
return annotation.Annotation(**kwargs)
VCF_HEADER_TEMPLATE = """##fileformat=VCFv4.1
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT{SAMPLES}"""
VCF_LINE_TEMPLATE = "{CHROM}\t{POS}\t{ID}\t{REF}\t{ALT}\t{QUAL}\t{FILTER}\t{INFO}"
class MockVcfWriter:
def __init__(self):
self.filename = tempfile.mktemp()
self.writer = None
self.samples = None
@staticmethod
def _get_header_for_info(k, v):
def get_type(x):
# Integer, Float, Flag, Character, and String.
if isinstance(x, bool):
return "Flag"
elif isinstance(x, int):
return "Integer"
elif isinstance(x, float):
return "Float"
else:
return "String"
if isinstance(v, (list, tuple)):
N = len(v)
value_type = get_type(v[0])
assert all(get_type(x) == value_type for x in v)
else:
N = 1
value_type = get_type(v)
return {"ID": k, "Type": value_type, "Description": "Added in test", "Number": N}
def set_samples(self, samples):
assert self.writer is None
self.samples = samples
def _init(self):
if self.writer is not None:
return
vcf_header = VCF_HEADER_TEMPLATE.format(
SAMPLES="" if not self.samples else "\t" + "\t".join(self.samples)
)
self.writer = cyvcf2.Writer.from_string(self.filename, vcf_header)
for key, fmt in RESERVED_GT_HEADERS.items():
self.writer.add_format_to_header({**fmt, **{"ID": key}})
def _get_format_str(self, fmt):
if not fmt:
return ""
else:
assert self.samples is not None and len(self.samples) == len(fmt)
format_keys = set(sum((list(x.keys()) for x in fmt), []))
format_str = "\t" + ":".join(format_keys)
for sample_format in fmt:
format_str += "\t" + ":".join([str(sample_format.get(k, ".")) for k in format_keys])
return format_str
def _get_annotation_string(self, info):
if not info:
return "."
else:
annotation = []
for k, v in info.items():
self.writer.add_info_to_header(self._get_header_for_info(k, v))
if isinstance(v, bool):
if v:
annotation.append(k)
else:
continue
elif isinstance(v, (list, tuple)):
annotation.append(f"{k}={','.join(str(x) for x in v)}")
else:
annotation.append(f"{k}={v}")
return ";".join(annotation)
def add_variant(self, variant_kwargs, fmt):
self._init()
if variant_kwargs is None:
variant_kwargs = {}
global allele_start
allele_start += 1
# Fallback to default values
variant_kwargs.setdefault("CHROM", "1")
variant_kwargs.setdefault("POS", allele_start)
variant_kwargs.setdefault("ID", ".")
variant_kwargs.setdefault("REF", "A")
variant_kwargs.setdefault("ALT", "C")
variant_kwargs.setdefault("QUAL", ".")
variant_kwargs.setdefault("FILTER", ".")
variant_kwargs["INFO"] = self._get_annotation_string(variant_kwargs.get("INFO"))
if variant_kwargs["FILTER"] not in [("PASS", ".")]:
self.writer.add_filter_to_header(
{"ID": variant_kwargs["FILTER"], "Description": "Added in test"}
)
variant_line = VCF_LINE_TEMPLATE.format(**variant_kwargs) + self._get_format_str(fmt)
self.writer.add_to_header(f"##contig=<ID={variant_kwargs['CHROM']}>")
v = self.writer.variant_from_string(variant_line)
self.writer.write_record(v)
def close(self):
if self.writer:
self.writer.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
if self.writer:
self.writer.close()
if os.path.isfile(self.filename):
os.unlink(self.filename)
def mock_record(variant=None, fmt=None, samples=None):
with MockVcfWriter() as writer:
writer.set_samples(samples)
writer.add_variant(variant, fmt)
writer.close()
vi = VcfIterator(writer.filename)
record = next(iter(vi))
return record
def mock_allele_with_annotation(session, allele_data=None, vcf_data=None, annotations=None):
assert annotations is not None, "Create allele using mock_allele if no annotation is required"
al = mock_allele(session, allele_data=allele_data, vcf_data=vcf_data)
an = _create_annotation(annotations, allele_id=al.id)
session.add(an)
session.flush()
return al, an
def mock_allele(session, allele_data=None, vcf_data=None):
if allele_data is None:
allele_data = {}
complete_allele_data = {
**build_allele_from_record(mock_record(vcf_data), ref_genome="GRCh37"),
**allele_data,
}
al = allele.Allele(**complete_allele_data)
session.add(al)
session.flush()
return al
@contextmanager
def tempinput(data: str):
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(data.encode())
temp.close()
try:
yield temp.name
finally:
os.unlink(temp.name)
def ped_info_file(ped_info):
if ped_info is None or len(ped_info) == 1:
return nullcontext()
PED_LINE = "{fam}\t{sample}\t{father}\t{mother}\t{sex}\t{affected}\t{proband}"
ped_str = ""
for ped_info in ped_info.values():
ped_str += PED_LINE.format(**ped_info) + "\n"
return tempinput(ped_str)
class ConverterConfig:
"""
Shortcut class to create Config objects for all members of AnnotationConverters, which should
be all of them.
e.g., cc.vep(), cc.keyvalue(), ...
"""
defaults = {
"source": "test source",
"target": "test target",
}
custom = {
"clinvarjson": {"source": "CLINVARJSON"},
"hgmdprimaryreport": {"source": "HGMD__pmid"},
"vep": {"source": "CSQ"},
}
def __init__(self) -> None:
# dynamically sets attributes based on member name, see class def for full list
for ac in AnnotationConverters:
default_args = {**self.defaults, **self.custom.get(ac.name, {})}
setattr(
self,
ac.name,
update_wrapper(partial(ac.value.Config, **default_args), ac.value.Config),
)
cc = ConverterConfig()
|
'''
File: bluestone_config.py
Project: bluestone
Author: daniel dong
Email: dongzhenguo@lantsang.cn
Copyright 2021 - 2021 bluestone tech
'''
import uos
import log
import ujson
import _thread
from usr import bluestone_common
log.basicConfig(level = log.INFO)
_config_log = log.getLogger("CONFIG")
class BluestoneConfig(object):
inst = None
def __init__(self, file_name):
self.lock = _thread.allocate_lock()
self.config_file_name = file_name
self.config_path = 'usr:/{}'.format(self.config_file_name)
self.restart_key_list = ['mqtt_tencent', 'socket', 'timer0', 'timer1', 'timer2', 'timer3']
self.key_list = ['uart0', 'uart1', 'uart2', 'mqtt_tencent', 'socket', 'timer0', 'timer1', 'timer2', 'timer3', 'gpio']
BluestoneConfig.inst = self
def check_key_restart(self, key):
if key is None:
return False
if key in self.restart_key_list:
return True
return False
def check_key_exist(self, key):
if key is None:
return False
if key in self.key_list:
return True
return False
def get_value(self, config, key):
if (config is None) or (key is None):
return None
keys = config.keys()
if keys is None:
return None
if key in keys:
return config[key]
return None
def get_int_value(self, config, key):
value = self.get_value(config, key)
if value is not None:
return int(config[key])
return 0
def get_float_value(self, config, key):
value = self.get_value(config, key)
if value is not None:
return float(config[key])
return 0.0
def init_config(self):
config = None
exist = bluestone_common.BluestoneCommon.check_file_exist(self.config_file_name)
if exist:
config = self.read_config()
_config_log.info("Read config from {}, the content is {}".format(self.config_path, config))
else:
self.create_config()
_config_log.info("Config {} does not exist, creating a new one".format(self.config_path))
return config
def create_config(self):
path = self.config_path.replace(':', '')
self.lock.acquire()
with open(path, 'w') as f:
f.write("{}")
self.lock.release()
def read_config(self):
path = self.config_path.replace(':', '')
content = None
self.lock.acquire()
with open(path, 'r') as f:
content = f.read()
self.lock.release()
return content
def read_config_by_name(self, config, name):
if config is None:
config = '{}'
current_config = None
system_config = ujson.loads(config)
if name in system_config.keys():
current_config = system_config[name]
return current_config
def update_config(self, name, params):
path = self.config_path
path = path.replace(':', '')
content = self.read_config()
config = ujson.loads(content)
self.write_config(config, name, params)
def write_config(self, config, name, params):
if config == None:
config = {}
config[name] = params
path = self.config_path.replace(':', '')
new_config = ujson.dumps(config)
self.lock.acquire()
with open(path, 'w') as f:
_config_log.info("New config is {}".format(new_config))
f.write(new_config)
self.lock.release()
|
# python2, 3 compatibility
from __future__ import absolute_import, division, print_function
import six
import os
import sys
import inspect
from builtins import str, open, range, dict
import pickle
import numpy as np
import pandas as pd
import pybedtools
from pybedtools import BedTool
from sklearn.preprocessing import FunctionTransformer
from genomelake.extractors import BaseExtractor, FastaExtractor, one_hot_encode_sequence, NUM_SEQ_CHARS
from pysam import FastaFile
from concise.preprocessing.splines import encodeSplines
from concise.utils.position import extract_landmarks, ALL_LANDMARKS
from gtfparse import read_gtf
from kipoi.metadata import GenomicRanges
import linecache
from kipoi.data import Dataset
import warnings
filename = inspect.getframeinfo(inspect.currentframe()).filename
DATALOADER_DIR = os.path.dirname(os.path.abspath(filename))
def sign_log_func(x):
return np.sign(x) * np.log10(np.abs(x) + 1)
def sign_log_func_inverse(x):
return np.sign(x) * (np.power(10, np.abs(x)) - 1)
class BedToolLinecache(BedTool):
"""Fast BedTool accessor by Ziga Avsec
Normal BedTools loops through the whole file to get the
line of interest. Hence the access it o(n)
"""
def __getitem__(self, idx):
line = linecache.getline(self.fn, idx + 1)
return pybedtools.create_interval_from_list(line.strip().split("\t"))
class DistanceTransformer:
"""Transforms the raw distances to the appropriate modeling form
"""
def __init__(self, pos_features, pipeline_obj_path):
"""
Args:
pos_features: list of positional features to use
pipeline_obj_path: path to the serialized pipeline obj_path
"""
self.pos_features = pos_features
self.pipeline_obj_path = pipeline_obj_path
# deserialize the pickle file
with open(self.pipeline_obj_path, "rb") as f:
pipeline_obj = pickle.load(f)
self.POS_FEATURES = pipeline_obj[0]
self.minmax_scaler = pipeline_obj[1]
self.imp = pipeline_obj[2]
self.funct_transform = FunctionTransformer(func=sign_log_func,
inverse_func=sign_log_func_inverse)
# for simplicity, assume all current pos_features are the
# same as from before
assert self.POS_FEATURES == self.pos_features
def transform(self, x):
# impute missing values and rescale the distances
xnew = self.minmax_scaler.transform(self.funct_transform.transform(self.imp.transform(x)))
# convert distances to spline bases
dist = {"dist_" + k: encodeSplines(xnew[:, i, np.newaxis], start=0, end=1, warn=False)
for i, k in enumerate(self.POS_FEATURES)}
return dist
class DistToClosestLandmarkExtractor(BaseExtractor):
"""Extract distances to the closest genomic landmark
# Arguments
gtf_file: Genomic annotation file path (say gencode gtf)
landmarks: List of landmarks to extract. See `concise.utils.position.extract_landmarks`
use_strand: Take into account the strand of the intervals
"""
multiprocessing_safe = True
def __init__(self, gtf_file, landmarks=ALL_LANDMARKS, use_strand=True, **kwargs):
super(DistToClosestLandmarkExtractor, self).__init__(gtf_file, **kwargs)
self._gtf_file = gtf_file
self.landmarks = extract_landmarks(gtf_file, landmarks=landmarks)
self.columns = landmarks # column names. Reqired for concating distances into array
self.use_strand = use_strand
# set index to chromosome and strand - faster access
self.landmarks = {k: v.set_index(["seqname", "strand"])
for k, v in six.iteritems(self.landmarks)}
def _extract(self, intervals, out, **kwargs):
def find_closest(ldm, interval, use_strand=True):
"""Uses
"""
# subset the positions to the appropriate strand
# and extract the positions
ldm_positions = ldm.loc[interval.chrom]
if use_strand and interval.strand != ".":
ldm_positions = ldm_positions.loc[interval.strand]
ldm_positions = ldm_positions.position.values
int_midpoint = (interval.end + interval.start) // 2
dist = (ldm_positions - 1) - int_midpoint # -1 for 0, 1 indexed positions
if use_strand and interval.strand == "-":
dist = - dist
return dist[np.argmin(np.abs(dist))]
out[:] = np.array([[find_closest(self.landmarks[ldm_name], interval, self.use_strand)
for ldm_name in self.columns]
for interval in intervals], dtype=float)
return out
def _get_output_shape(self, num_intervals, width):
return (num_intervals, len(self.columns))
class TxtDataset(Dataset):
def __init__(self, path):
with open(path, "r") as f:
self.lines = f.readlines()
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
return int(self.lines[idx].strip())
# --------------------------------------------
class SeqDistDataset(Dataset):
"""
Args:
intervals_file: file path; tsv file
Assumes bed-like `chrom start end id score strand` format.
fasta_file: file path; Genome sequence
gtf_file: file path; Genome annotation GTF file.
filter_protein_coding: Considering genomic landmarks only for protein coding genes
preproc_transformer: file path; tranformer used for pre-processing.
target_file: file path; path to the targets
batch_size: int
"""
SEQ_WIDTH = 101
def __init__(self, intervals_file, fasta_file, gtf_file,
filter_protein_coding=True,
target_file=None, use_linecache=False):
if sys.version_info[0] != 3:
warnings.warn("Only Python 3 is supported. You are using Python {0}".format(sys.version_info[0]))
self.gtf = read_gtf(gtf_file)
self.filter_protein_coding = filter_protein_coding
if self.filter_protein_coding:
if "gene_type" in self.gtf:
self.gtf = self.gtf[self.gtf["gene_type"] == "protein_coding"]
elif "gene_biotype" in self.gtf:
self.gtf = self.gtf[self.gtf["gene_biotype"] == "protein_coding"]
else:
warnings.warn("Gtf doesn't have the field 'gene_type' or 'gene_biotype'. Considering genomic landmarks" +
"of all genes not just protein_coding.")
if not np.any(self.gtf.seqname.str.contains("chr")):
self.gtf["seqname"] = "chr" + self.gtf["seqname"]
# intervals
if use_linecache:
self.bt = BedToolLinecache(intervals_file)
else:
self.bt = BedTool(intervals_file)
# extractors
self.seq_extractor = FastaExtractor(fasta_file)
self.dist_extractor = DistToClosestLandmarkExtractor(gtf_file=self.gtf,
landmarks=ALL_LANDMARKS)
# here the DATALOADER_DIR contains the path to the current directory
self.dist_transformer = DistanceTransformer(ALL_LANDMARKS,
DATALOADER_DIR + "/dataloader_files/position_transformer.pkl")
# target
if target_file:
self.target_dataset = TxtDataset(target_file)
assert len(self.target_dataset) == len(self.bt)
else:
self.target_dataset = None
def __len__(self):
return len(self.bt)
def __getitem__(self, idx):
interval = self.bt[idx]
if interval.stop - interval.start != self.SEQ_WIDTH:
raise ValueError("Expected the interval to be {0} wide. Recieved stop - start = {1}".
format(self.SEQ_WIDTH, interval.stop - interval.start))
out = {}
out['inputs'] = {}
# input - sequence
out['inputs']['seq'] = np.squeeze(self.seq_extractor([interval]), axis=0)
# input - distance
dist_dict = self.dist_transformer.transform(self.dist_extractor([interval]))
dist_dict = {k: np.squeeze(v, axis=0) for k, v in dist_dict.items()} # squeeze the batch axis
out['inputs'] = {**out['inputs'], **dist_dict}
# targets
if self.target_dataset is not None:
out["targets"] = np.array([self.target_dataset[idx]])
# metadata
out['metadata'] = {}
out['metadata']['ranges'] = GenomicRanges.from_interval(interval)
return out
def test_dataset():
"""Runs tests on the function
"""
# File paths
intervals_file = "example_files/intervals.bed"
target_file = "example_files/targets.tsv"
gtf_file = "example_files/gencode.v24.annotation_chr22.gtf"
fasta_file = "example_files/hg38_chr22.fa"
ds = SeqDistDataset(intervals_file, fasta_file, gtf_file, target_file)
ds[0]
ds[10]
it = ds.batch_iter(32)
next(it)
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def Output():
return render_template('index.html'), 200
app.run(debug=True, host="0.0.0.0", port=9000) |
import importlib
import inspect
import os
def create_event_list(chat_id):
event_classes = [str(x.split('.')[0]) for x in os.listdir(os.path.join(os.getcwd(), '..', 'events'))]
events = {}
for e in event_classes:
module = importlib.import_module('events.{}'.format(e))
if hasattr(module, e) and inspect.isclass(getattr(module, e)):
event = getattr(module, e)(chat_id)
events[event.event_id] = {'prev' : [] ,'class' : getattr(module, e)}
for entry in event.prev_event_ids:
events[event.event_id]['prev'].append(entry)
return events
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.