content stringlengths 5 1.05M |
|---|
import matplotlib.pyplot as plt
import numpy as np
###Problem 1
N=10000
mathematics = np.random.normal(500,100,N)
reading = np.random.normal(500,100,N)
writing = np.random.normal(500,100,N)
SATscore = mathematics + reading + writing
plt.hist(SATscore)
plt.show()
np.std(SATscore)
np.sqrt(3*10000)
np.var(SATscore)
np.mean(SATscore)
#problem 2
sig = np.ones((3,3)) *5000
sig[0,0]=10000
sig[1,1]=10000
sig[2,2]=10000
sig[1,2]=7000
sig[2,1]=7000
mvnscores = np.random.multivariate_normal(mu,sig,10000)
SATscores = np.sum(mvnscores,axis=1)
plt.hist(SATscores,bins=100)
plt.show()
np.mean(SATscores)
np.var(SATscores)
np.std(SATscores)
np.sqrt(64000)
#Figures:
plt.subplot(311)
dat=np.random.exponential(1,[10000,10])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,2.5)
plt.ylim(0,400)
plt.title('n=10')
plt.subplot(312)
dat=np.random.exponential(1,[10000,20])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,2.5)
plt.ylim(0,400)
plt.title('n=20')
plt.subplot(313)
dat=np.random.exponential(1,[10000,30])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,2.5)
plt.ylim(0,400)
plt.title('n=30')
plt.show()
#poisson
xmax=6
ymax=800
plt.subplot(311)
dat=np.random.poisson(3,[10000,10])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.title('n=10')
plt.subplot(312)
dat=np.random.poisson(3,[10000,20])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.title('n=20')
plt.subplot(313)
dat=np.random.poisson(3,[10000,30])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.title('n=30')
plt.show()
#beta
xmax=1
ymax=400
plt.subplot(311)
dat=np.random.beta(.1,.1,[10000,10])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.title('n=10')
#
plt.subplot(312)
dat=np.random.beta(.1,.1,[10000,20])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.title('n=20')
#
plt.subplot(313)
dat=np.random.beta(.1,.1,[10000,30])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.title('n=30')
plt.show()
#beta
xmin=12
xmax=20
ymax=700
plt.subplot(311)
dat=np.random.binomial(20,.8,[10000,10])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(xmin,xmax)
plt.ylim(0,ymax)
plt.title('n=10')
#
plt.subplot(312)
dat=np.random.binomial(20,.8,[10000,20])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(xmin,xmax)
plt.ylim(0,ymax)
plt.title('n=20')
#
plt.subplot(313)
dat=np.random.binomial(20,.8,[10000,30])
plt.hist(np.mean(dat,1),bins=100)
plt.xlim(xmin,xmax)
plt.ylim(0,ymax)
plt.title('n=30')
plt.show()
|
#------------------------------------------------------------------------------
#
# PyGUI - OpenGL - Win32
#
#------------------------------------------------------------------------------
import win32con as wc, win32ui as ui, win32gui as gui
import GDIPlus as gdi
import WGL
from GUI.Components import win_none
from GUI.OpenGL import WGL as wgl
from GUI.GGLViews import GLView as GGLView
from GUI.GGLPixmaps import GLPixmap as GGLPixmap
from GUI.GGLConfig import GLConfig as GGLConfig, GLConfigError
from GUI.GLContexts import GLContext
from GUI.GLTextures import Texture
win_style = wc.WS_VISIBLE | wc.WS_CLIPCHILDREN | wc.WS_CLIPSIBLINGS
win_default_size = GGLView._default_size
win_default_rect = (0, 0, win_default_size[0], win_default_size[1])
#------------------------------------------------------------------------------
class GLConfig(GGLConfig):
def _as_win_pixelattrs(self, mode):
print "GLConfig._as_arb_pixelattrs: mode =", mode ###
attrs = {}
attrs[wgl.WGL_SUPPORT_OPENGL_ARB] = True
if mode == 'screen' or mode == 'both':
print "GLConfig: requesting screen drawing" ###
attrs[wgl.WGL_DRAW_TO_WINDOW_ARB] = True
if self._double_buffer:
attrs[wgl.WGL_DOUBLE_BUFFER_ARB] = True
if mode == 'pixmap' or mode == 'both':
print "GLConfig: requesting pixmap drawing" ###
attrs[wgl.WGL_DRAW_TO_PBUFFER_ARB] = True
if self._stereo:
attrs[wgl.WGL_STEREO_ARB] = True
attrs[wgl.WGL_PIXEL_TYPE_ARB] = wgl.WGL_TYPE_RGBA_ARB
bits = self._color_size
attrs[wgl.WGL_RED_BITS_ARB] = bits
attrs[wgl.WGL_GREEN_BITS_ARB] = bits
attrs[wgl.WGL_BLUE_BITS_ARB] = bits
if self._alpha:
attrs[wgl.WGL_ALPHA_BITS_ARB] = self._alpha_size
attrs[wgl.WGL_AUX_BUFFERS_ARB] = self._aux_buffers
if self._depth_buffer:
attrs[wgl.WGL_DEPTH_BITS_ARB] = self._depth_size
if self._stencil_buffer:
attrs[wgl.WGL_STENCIL_BITS_ARB] = self._stencil_size
if self._accum_buffer:
bits = self._accum_size
attrs[wgl.WGL_ACCUM_RED_BITS_ARB] = bits
attrs[wgl.WGL_ACCUM_GREEN_BITS_ARB] = bits
attrs[wgl.WGL_ACCUM_BLUE_BITS_ARB] = bits
return attrs
def _from_win_pixelattrs(cls, attrs):
self = cls.__new__(cls)
self._double_buffer = attrs[wgl.WGL_DOUBLE_BUFFER_ARB]
self._color_size = attrs[wgl.WGL_COLOR_BITS_ARB] // 3
self._alpha_size = attrs[wgl.WGL_ALPHA_BITS_ARB]
self._alpha = self._alpha_size > 0
self._stereo = attrs[wgl.WGL_STEREO_ARB] #flags & wgl.PFD_STEREO != 0
self._aux_buffers = attrs[wgl.WGL_AUX_BUFFERS_ARB] > 0
self._depth_size = attrs[wgl.WGL_DEPTH_BITS_ARB]
self._depth_buffer = self._depth_size > 0
self._stencil_size = attrs[wgl.WGL_STENCIL_BITS_ARB]
self._stencil_buffer = self._stencil_bits > 0
self._accum_size = attrs[wgl.WGL_ACCUM_BITS_ARB] // 3
self._accum_buffer = self._accum_size > 0
self._multisample = False
self._samples_per_pixel = 1
return self
# def _check_win_pixelattrs(self, attrs, mode):
# if mode == 'screen' or mode == 'both':
# if not attrs[wgl.WGL_DRAW_TO_WINDOW_ARB]:
# raise GLConfigError("Rendering to screen not supported")
# if mode == 'pixmap' or mode == 'both':
# if not attrs[wgl.WGL_DRAW_TO_PBUFFER_ARB]:
# raise GLConfigError("Rendering to pixmap not supported")
# if self._alpha and attrs[wgl.WGL_ALPHA_BITS_ARB] == 0:
# raise GLConfigError("Alpha channel not available")
# if self._stereo and not attrs[wgl.WGL_STEREO_ARB]:
# raise GLConfigError("Stereo buffer not available")
# if self._aux_buffers and attrs]wgl.WGL_AUX_BUFFERS_ARB] == 0:
# raise GLConfigError("Auxiliary buffers not available")
# if self._depth_buffer and attrs[wgl.WGL_DEPTH_BITS_ARB] == 0:
# raise GLConfigError("Depth buffer not available")
# if self._stencil_buffer and attrs[wgl.WGL_STENCIL_BITS] == 0:
# raise GLConfigError("Stencil buffer not available")
# if self.accum_buffer and attrs[wgl.WGL_ACCUM_BITS] == 0:
# raise GLConfigError("Accumulation buffer not available")
_win_query_pixelattr_keys = [
wgl.WGL_SUPPORT_OPENGL_ARB,
wgl.WGL_DRAW_TO_WINDOW_ARB,
wgl.WGL_DOUBLE_BUFFER_ARB,
wgl.WGL_DRAW_TO_PBUFFER_ARB,
wgl.WGL_STEREO_ARB,
wgl.WGL_PIXEL_TYPE_ARB,
wgl.WGL_COLOR_BITS_ARB,
wgl.WGL_ALPHA_BITS_ARB,
wgl.WGL_AUX_BUFFERS_ARB,
wgl.WGL_DEPTH_BITS_ARB,
wgl.WGL_STENCIL_BITS_ARB,
wgl.WGL_ACCUM_BITS_ARB,
]
def _win_supported_pixelformat(self, hdc, mode):
req_attrs = self._as_win_pixelattrs(mode)
print "GLConfig: Choosing pixel format for hdc", hdc ###
print "Requested attributes:", req_attrs ###
req_array = WGL.attr_array(req_attrs)
print "Requested array:", req_array ###
ipfs, nf = wgl.wglChoosePixelFormatEXT(hdc, req_array, None, 1)
print "Pixel formats:", ipfs ###
print "No. of formats:", nf ###
if not ipfs:
req_attrs[wgl.WGL_DOUBLE_BUFFER_ARB] = not self._double_buffer
req_array = WGL.attr_array(req_attrs)
ipfs, nf = wglChoosePixelFormatARB(hdc, req_array, None, 1)
if not ipfs:
return None, None
print "GLConfig: Describing pixel format", ipf, "for hdc", hdc ###
keys = _win_query_pixelattr_keys
values = wglGetPixelFormatAttribivARB(hdc, ipf, 0, keys)
print "Actual values:", values ###
act_attrs = WGL.attr_dict(keys, values)
print "Actual attrs:", act_attrs ###
return ipfs[0], act_attrs
def supported(self, mode = 'both'):
dc = win_none.GetDC()
hdc = dc.GetSafeHdc()
ipf, act_attrs = self._win_supported_pixelformat(hdc, mode)
win_none.ReleaseDC(dc)
if ipf is None:
return None
return GLConfig._from_win_pixelattrs(act_attrs)
#------------------------------------------------------------------------------
class GLView(GGLView):
def __init__(self, config = None, share_group = None, **kwds):
config = GLConfig._from_args(config, kwds)
win = ui.CreateWnd()
win.CreateWindow(None, None, win_style, win_default_rect,
win_none, 0)
dc = win.GetDC()
hdc = dc.GetSafeHdc()
GLContext.__init__(self, share_group, config, hdc, 'screen')
GGLView.__init__(self, _win = win, **kwds)
self._with_context(hdc, self._init_context)
win.ReleaseDC(dc)
def destroy(self):
GLContext.destroy(self)
GGLView.destroy(self)
def with_context(self, proc, flush = False):
win = self._win
dc = win.GetDC()
hdc = dc.GetSafeHdc()
try:
self._with_context(hdc, proc, flush)
finally:
win.ReleaseDC(dc)
def OnPaint(self):
#print "GLView.OnPaint" ###
win = self._win
dc, ps = win.BeginPaint()
try:
hdc = dc.GetSafeHdc()
self._with_context(hdc, self.render, True)
finally:
win.EndPaint(ps)
def _resized(self, delta):
self.with_context(self._update_viewport)
#------------------------------------------------------------------------------
#class GLPixmap(GGLPixmap):
#
# def __init__(self, width, height, config = None, share_group = None, **kwds):
# print "GLPixmap:", width, height, kwds ###
# config = GLConfig._from_args(config, kwds)
# image = gdi.Bitmap(width, height)
# self._win_image = image
# graphics = gdi.Graphics.from_image(image)
# self._win_graphics = graphics
# hdc = graphics.GetHDC()
# self._win_hdc = hdc
# GLContext.__init__(self, share_group, config, hdc, 'pixmap')
# self._with_context(hdc, self._init_context)
# print "GLPixmap: done" ###
#
# def __del__(self):
# graphics = self._win_graphics
# graphics.ReleaseHDC(self._win_hdc)
#
# def with_context(self, proc, flush = False):
# try:
# self._with_context(self._hdc, proc, flush)
# finally:
# graphics.ReleaseHDC(hdc)
#------------------------------------------------------------------------------
class GLPixmap(GGLPixmap):
def __init__(self, width, height, config = None, share_group = None, **kwds):
print "GLPixmap:", width, height, kwds ###
config = GLConfig._from_args(config, kwds)
pyhdc = gui.CreateCompatibleDC(0)
dc = ui.CreateDCFromHandle(pyhdc)
hdc = dc.GetSafeHdc()
hbm = gui.CreateCompatibleBitmap(hdc, width, height)
bm = ui.CreateBitmapFromHandle(hbm)
dc.SelectObject(bm)
self._win_dc = dc
self._win_hbm = hbm
self._win_bm = bm
GLContext.__init__(self, share_group, config, hdc, 'pixmap')
self._with_context(hdc, self._init_context)
print "GLPixmap: done" ###
def with_context(self, proc, flush = False):
hdc = self._win_dc.GetSafeHdc()
self._with_context(hdc, proc, flush)
#------------------------------------------------------------------------------
def win_dump_pixelformat(pf):
print "nSize =", pf.nSize
print "nVersion =", pf.nVersion
print "dwFlags = 0x%08x" % pf.dwFlags
print "iPixelType =", pf.iPixelType
print "cColorBits =", pf.cColorBits
print "cRedBits =", pf.cRedBits
print "cRedShift =", pf.cRedShift
print "cGreenBits =", pf.cGreenBits
print "cGreenShift =", pf.cGreenShift
print "cBlueBits =", pf.cBlueBits
print "cBlueShift =", pf.cBlueShift
print "cAlphaBits =", pf.cAlphaBits
print "cAlphaShift =", pf.cAlphaShift
print "cAccumBits =", pf.cAccumBits
print "cAccumRedBits =", pf.cAccumRedBits
print "cAccumGreenBits =", pf.cAccumGreenBits
print "cAccumBlueBits =", pf.cAccumBlueBits
print "cDepthBits =", pf.cDepthBits
print "cStencilBits =", pf.cStencilBits
print "cAuxBuffers =", pf.cAuxBuffers
print "iLayerType =", pf.iLayerType
print "bReserved =", pf.bReserved
print "dwLayerMask =", pf.dwLayerMask
print "dwVisibleMask =", pf.dwVisibleMask
print "dwDamageMask =", pf.dwDamageMask
|
from Repositorio.Entidades.Nave import Nave
from Repositorio.Conexao.conexao import ConexaoPostgre
class NavesRepositorio:
def __init__(self):
self.conexao = ConexaoPostgre()
def createNave(self, nave):
con = self.conexao.conectar()
cur = con.cursor()
sql = f"""INSERT INTO tb_nave (
id_fabricante,
nome,
modelo,
tripulacao,
passageiros,
capacidade_carga,
preco
) VALUES (
{nave.getIdFabricante() if nave.getIdFabricante() != None else 'NULL'},
'{nave.getNome() if nave.getNome() != None else 'NULL'}',
'{nave.getModelo() if nave.getModelo() != None else 'NULL'}',
{nave.getTripulacao() if nave.getTripulacao() != None else 'NULL'},
{nave.getPassageiros() if nave.getPassageiros() != None else 'NULL'},
{nave.getCapacidadeCarga() if nave.getCapacidadeCarga() != None else 'NULL'},
{nave.getPreco() if nave.getPreco() != None else 'NULL'}
) RETURNING id_nave;"""
cur.execute(sql)
id_nave = cur.fetchone()[0]
nave.setIdNave(id_nave)
con.commit()
con.close()
return nave
def readNaves(self):
con = self.conexao.conectar()
cur = con.cursor()
sql = f"""SELECT id_nave,
id_fabricante,
nome,
modelo,
tripulacao,
passageiros,
capacidade_carga,
preco
FROM tb_nave;"""
cur.execute(sql)
listaNaveBanco = cur.fetchall()
listaNaveEntidade = self.converterListaBancoParaListaEntidade(listaNaveBanco)
con.close()
return listaNaveEntidade
def readNave(self, id_nave):
con = self.conexao.conectar()
cur = con.cursor()
sql = f"""SELECT id_nave,
id_fabricante,
nome,
modelo,
tripulacao,
passageiros,
capacidade_carga,
preco
FROM tb_nave
WHERE tb_nave.id_nave = {id_nave};"""
cur.execute(sql)
naveBanco = cur.fetchall()
con.close()
if len(naveBanco) > 0:
naveEntidade = self.converterBancoParaEntidade(naveBanco[0])
return naveEntidade
else:
return None
def updateNave(self, nave):
con = self.conexao.conectar()
cur = con.cursor()
sql = f"""UPDATE tb_nave
SET id_fabricante={nave.getIdFabricante() if nave.getIdFabricante() != None else 'NULL'},
nome='{nave.getNome() if nave.getNome() != None else 'NULL'}',
modelo='{nave.getModelo() if nave.getModelo() != None else 'NULL'}',
tripulacao={nave.getTripulacao() if nave.getTripulacao() != None else 'NULL'},
passageiros={nave.getPassageiros() if nave.getPassageiros() != None else 'NULL'},
capacidade_carga={nave.getCapacidadeCarga() if nave.getCapacidadeCarga() != None else 'NULL'},
preco={nave.getPreco() if nave.getPreco() != None else 'NULL'}
WHERE tb_nave.id_nave = {nave.getIdNave() if nave.getIdNave() != None else 'NULL'};"""
cur = con.cursor()
cur.execute(sql)
con.commit()
con.close()
return nave
def deleteNave(self, id_nave):
con = self.conexao.conectar()
cur = con.cursor()
sql = f"""DELETE FROM tb_nave
WHERE tb_nave.id_nave = {id_nave};"""
cur = con.cursor()
cur.execute(sql)
con.commit()
con.close()
def converterBancoParaEntidade(self, naveBanco):
naveEntidade = Nave()
naveEntidade.setIdNave(naveBanco[0])
naveEntidade.setIdFabricante(naveBanco[1])
naveEntidade.setNome(naveBanco[2])
naveEntidade.setModelo(naveBanco[3])
naveEntidade.setTripulacao(naveBanco[4])
naveEntidade.setPassageiros(naveBanco[5])
naveEntidade.setCapacidadeCarga(naveBanco[6])
naveEntidade.setPreco(naveBanco[7])
return naveEntidade
def converterListaBancoParaListaEntidade(self, listaNaveBanco):
listaNaveEntidade = []
for naveBanco in listaNaveBanco:
listaNaveEntidade.append(self.converterBancoParaEntidade(naveBanco))
return listaNaveEntidade
def nomeJaExiste(self, nave):
con = self.conexao.conectar()
cur = con.cursor()
sql = f"""SELECT count(1)
FROM tb_nave
WHERE tb_nave.nome = '{nave.getNome()}' """
if nave.getIdNave() != None:
sql += f"AND tb_nave.id_nave != {nave.getIdNave()}"
cur.execute(sql)
qtdNaves = cur.fetchall()
con.close()
if qtdNaves[0][0] > 0:
return True
else:
return False |
import numpy as np
from scipy import spatial
class Camera( object ):
"""
A utility class for storing camera properties (position, dimensions, fov etc.).
"""
def __init__(self, pos, ori, proj, fov, dims, step = None):
"""
Creates a new camera object.
*Arguments*:
- pos = the camera position vector.
- ori = the camera orientation vector.
- proj = the project type: 'persp' or 'pano'.
- fov = the camera field of view (degrees).
- dims = the pixel dimensions of the frame.
- step = the angular step of pixels in the x-direction for panoramic projections. Default is None.
"""
assert isinstance(pos, np.ndarray) and len(pos)==3, \
"Error - camera_position must be a numpy array with length 3. "
self.pos = pos.copy()
assert isinstance(ori, np.ndarray) and len(ori) == 3, \
"Error - camera_ori must be a numpy array with length 3. "
self.ori = ori.copy()
assert 'persp' in proj.lower() or 'pano' in proj.lower(), \
"Error - camera type should be pano or persp."
self.proj = proj
assert len(dims) >= 2, "Error, dims must be a tuple or list of length 2."
self.dims = dims
assert np.isfinite(fov), "Error, fov must be numeric."
self.fov = fov
self.step = step
def clone(self):
"""
Create a deep copy of a camera instance.
"""
return Camera( self.pos.copy(), self.ori.copy(), self.proj, self.fov, self.dims, self.step )
def set_transform(self, camera_pos, camera_ori):
"""
Sets the position and rotation of this camera.
*Arguments*:
- camera_pos = the new position.
- camera_ori = the new orientation.
"""
assert isinstance(camera_pos, np.ndarray) and len(camera_pos) == 3, \
"Error - camera_position must be a numpy array with length 3. "
self.pos = camera_pos.copy()
assert isinstance(camera_ori, np.ndarray) and len(camera_ori) == 3, \
"Error - camera_ori must be a numpy array with length 3. "
self.ori = camera_ori.copy()
def xdim(self):
"""
Return camera x-dimension (pixels).
"""
return self.dims[0]
def ydim(self):
"""
Return camera y-dimension (pixels).
"""
return self.dims[1]
def is_perspective(self):
"""
Returns true if this camera uses a perspective project.
"""
return 'persp' in self.proj.lower()
def is_panoramic(self):
"""
Returns true if this camera uses a panoramic project.
"""
return 'pano' in self.proj.lower()
def get_rotation_matrix(self):
"""
Return the rotation matrix of this camera (based on its Euler angles).
"""
return spatial.transform.Rotation.from_euler('XYZ', -self.ori, degrees=True).as_matrix()
|
import datajoint as dj
from element_lab import lab
from element_animal import subject
from element_session import session_with_datetime as session
from element_event import trial, event
from element_miniscope import miniscope
from element_lab.lab import Source, Lab, Protocol, User, Location, Project
from element_animal.subject import Subject
from element_session.session_with_datetime import Session
from .paths import get_miniscope_root_data_dir, get_session_directory
if 'custom' not in dj.config:
dj.config['custom'] = {}
db_prefix = dj.config['custom'].get('database.prefix', '')
# Activate `lab`, `subject`, `session` schema ------------------------------------------
lab.activate(db_prefix + 'lab')
subject.activate(db_prefix + 'subject', linking_module=__name__)
Experimenter = lab.User
session.activate(db_prefix + 'session', linking_module=__name__)
# Activate "event" and "trial" schema ---------------------------------
trial.activate(db_prefix + 'trial', db_prefix + 'event', linking_module=__name__)
# Declare table `Equipment` and `AnatomicalLocation` for use in element_miniscope ------
@lab.schema
class Equipment(dj.Manual):
definition = """
equipment : varchar(32)
---
modality : varchar(64)
description=null : varchar(256)
"""
@lab.schema
class AnatomicalLocation(dj.Manual):
definition = """
recording_location_id : varchar(16)
---
anatomical_description: varchar(256)
"""
# Activate `miniscope` schema ----------------------------------------------------------
miniscope.activate(db_prefix + 'miniscope', linking_module=__name__)
|
"""
"""
from typing import List, Set, Dict
from collections import Counter
import datetime as dttm
from app.utility.decorators import type_check
from app.engines.reddit import connect_to_reddit
from app.constants import SUB_REDDIT, BLACK_LISTED_STOCK_NAMES
from app.utility.nltk import (
get_stock_symbols,
toke_it,
remove_stop_words,
)
STOCK_SYMBOLS: Set = get_stock_symbols()
@type_check(float)
def get_date(created_at: float) -> dttm.datetime:
return dttm.datetime.fromtimestamp(created_at)
@type_check(list)
def only_keep_stocks(tokenized: List[str]) -> List[str]:
""""""
adjusted_corpus: List[str] = [word for word in tokenized if word in STOCK_SYMBOLS]
if len(adjusted_corpus) > 0:
print(f"Before {tokenized}")
print(adjusted_corpus)
return adjusted_corpus
def get_submission_data(reddit_submission) -> Dict:
""""""
return {
"title": reddit_submission.title,
"score": reddit_submission.score,
"id": reddit_submission.id,
"url": reddit_submission.url,
"comms_num": reddit_submission.num_comments,
"created": get_date(float(reddit_submission.created)),
"body": reddit_submission.selftext,
}
def main() -> None:
corpus = []
reddit = connect_to_reddit()
subreddit = reddit.subreddit(SUB_REDDIT)
top_subreddit = subreddit.hot(limit=50_000)
for submission in top_subreddit:
submission_data: Dict = get_submission_data(submission)
sub_submission = reddit.submission(submission.id)
print(submission.selftext)
child_comments = [comment for comment in submission.comments]
for com in child_comments:
try:
reddit_comment = reddit.comment(com)
if reddit_comment.body == "":
continue
tokenized_body: List[str] = toke_it(reddit_comment.body.upper())
tokenized_body: List[str] = remove_stop_words(tokenized_body)
tokenized_body: List[str] = only_keep_stocks(tokenized_body)
corpus.append(tokenized_body)
except Exception as e:
print(e)
print("onto new thread")
# topics_data = pd.DataFrame(topics_dict)
# topics_data["title"] = topics_data["title"].str.lower()
# df = topics_data.query('title.str.contains("what are your moves")')
# print(f"We have {df.shape} threads that need to be processed")
# for id in df["id"].to_list():
# submission = reddit.submission(df["id"].iloc[0])
if __name__ == "__main__":
main()
|
"""
"""
import pytest
import numpy as np
from ..v4_sdss_assign_gri import assign_restframe_sdss_gri
@pytest.mark.xfail
def test1():
"""
"""
ngals = int(1e4)
satmask = np.random.rand(ngals) < 0.3
num_sats = np.count_nonzero(satmask)
upid = np.zeros(ngals, dtype=int) - 1
upid[satmask] = np.random.randint(100, 500, num_sats)
mstar = 10**np.random.uniform(8, 12, ngals)
sfr_percentile = np.random.rand(ngals)
mhalo = 10**np.random.uniform(10, 15, ngals)
z = np.random.uniform(0, 3, ngals)
result = assign_restframe_sdss_gri(upid, mstar, sfr_percentile, mhalo, z)
magr, gr_mock, ri_mock, is_red_gr_mock, is_red_ri_mock = result
|
import FWCore.ParameterSet.Config as cms
# Define the CMSSW process
process = cms.Process("RERUN")
import PhysicsTools.PatAlgos.tools.helpers as configtools
patAlgosToolsTask = configtools.getPatAlgosToolsTask(process)
# Load the standard set of configuration modules
process.load('Configuration.StandardSequences.Services_cff')
patAlgosToolsTask.add(process.randomEngineStateProducer)
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# Message Logger settings
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.destinations = ['cout', 'cerr']
process.MessageLogger.cerr.FwkReport.reportEvery = 1
# Set the process options -- Display summary at the end, enable unscheduled execution
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(False)
)
# How many events to process
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
#configurable options =======================================================================
runOnData=False #data/MC switch
usePrivateSQlite=False #use external JECs (sqlite file)
useHFCandidates=True #create an additionnal NoHF slimmed MET collection if the option is set to false
redoPuppi=True # rebuild puppiMET
#===================================================================
### External JECs =====================================================================================================
from Configuration.AlCa.autoCond import autoCond
if runOnData:
process.GlobalTag.globaltag = autoCond['run2_data']
else:
process.GlobalTag.globaltag = autoCond['run2_mc']
#Summer16_25nsV1_MC.db
if usePrivateSQlite:
from CondCore.DBCommon.CondDBSetup_cfi import *
import os
if runOnData:
era="Summer15_25nsV6_DATA"
else:
era="Summer15_25nsV6_MC"
process.jec = cms.ESSource("PoolDBESSource",CondDBSetup,
connect = cms.string( "frontier://FrontierPrep/CMS_COND_PHYSICSTOOLS"),
#connect = cms.string('sqlite:'+era+'.db'),
toGet = cms.VPSet(
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+era+"_AK4PF"),
label= cms.untracked.string("AK4PF")
),
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+era+"_AK4PFchs"),
label= cms.untracked.string("AK4PFchs")
),
)
)
process.es_prefer_jec = cms.ESPrefer("PoolDBESSource",'jec')
### =====================================================================================================
# Define the input source
if runOnData:
fname = '/store/relval/CMSSW_8_0_20/MET/MINIAOD/80X_dataRun2_relval_Candidate_2016_09_02_10_27_40_RelVal_met2016B-v1/00000/2E6B9138-1C7A-E611-AE72-0025905A60DE.root'
else:
fname = '/store/relval/CMSSW_8_0_20/RelValZMM_13/MINIAODSIM/80X_mcRun2_asymptotic_2016_TrancheIV_v4_Tr4GT_v4-v1/00000/64F9C946-C57A-E611-AA05-0CC47A74527A.root'
# Define the input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring([ fname ])
)
### ---------------------------------------------------------------------------
### Removing the HF from the MET computation
### ---------------------------------------------------------------------------
if not useHFCandidates:
process.noHFCands = cms.EDFilter("CandPtrSelector",
src=cms.InputTag("packedPFCandidates"),
cut=cms.string("abs(pdgId)!=1 && abs(pdgId)!=2 && abs(eta)<3.0")
)
patAlgosToolsTask.add(process.noHFCands)
#jets are rebuilt from those candidates by the tools, no need to do anything else
### =================================================================================
from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
#default configuration for miniAOD reprocessing, change the isData flag to run on data
#for a full met computation, remove the pfCandColl input
runMetCorAndUncFromMiniAOD(process,
isData=runOnData,
)
if not useHFCandidates:
runMetCorAndUncFromMiniAOD(process,
isData=runOnData,
pfCandColl=cms.InputTag("noHFCands"),
reclusterJets=True, #needed for NoHF
recoMetFromPFCs=True, #needed for NoHF
postfix="NoHF"
)
if redoPuppi:
from PhysicsTools.PatAlgos.slimming.puppiForMET_cff import makePuppiesFromMiniAOD
makePuppiesFromMiniAOD( process );
runMetCorAndUncFromMiniAOD(process,
isData=runOnData,
metType="Puppi",
recoMetFromPFCs=True,
reclusterJets=True,
jetFlavor="AK4PFPuppi",
postfix="Puppi"
)
process.MINIAODSIMoutput = cms.OutputModule("PoolOutputModule",
compressionLevel = cms.untracked.int32(4),
compressionAlgorithm = cms.untracked.string('LZMA'),
eventAutoFlushCompressedSize = cms.untracked.int32(15728640),
outputCommands = cms.untracked.vstring( "keep *_slimmedMETs_*_*",
"keep *_slimmedMETsNoHF_*_*",
"keep *_patPFMet_*_*",
"keep *_patPFMetT1_*_*",
"keep *_patPFMetT1JetResDown_*_*",
"keep *_patPFMetT1JetResUp_*_*",
"keep *_patPFMetT1Smear_*_*",
"keep *_patPFMetT1SmearJetResDown_*_*",
"keep *_patPFMetT1SmearJetResUp_*_*",
"keep *_patPFMetT1Puppi_*_*",
"keep *_slimmedMETsPuppi_*_*",
),
fileName = cms.untracked.string('corMETMiniAOD.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('')
),
dropMetaData = cms.untracked.string('ALL'),
fastCloning = cms.untracked.bool(False),
overrideInputFileSplitLevels = cms.untracked.bool(True)
)
process.MINIAODSIMoutput_step = cms.EndPath(process.MINIAODSIMoutput, patAlgosToolsTask)
|
from hubcheck.pageobjects.basepagewidget import BasePageWidget
from hubcheck.pageobjects.basepageelement import Text
from hubcheck.exceptions import NoSuchFileAttachmentError
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
class UploadList2(BasePageWidget):
def __init__(self, owner, locatordict={}):
super(UploadList2,self).__init__(owner,locatordict)
# load hub's classes
UploadList2_Locators = self.load_class('UploadList2_Locators')
# update this object's locator
self.locators.update(UploadList2_Locators.locators)
# update the locators with those from the owner
self.update_locators_from_owner()
# setup page object's components
self._updateLocators()
def get_uploaded_files(self):
fname = []
elist = []
try:
elist = self.find_elements_in_owner(self.locators['filename'])
except NoSuchElementException:
pass
# store filename is there are any
fnames = [e.text for e in elist]
return fname
def delete_file(self,filename):
if not filename:
return
elist = self.find_elements_in_owner(self.locators['row'])
for e in elist:
fnameEle = self.find_element(self.locators['filename'],e)
if fnameEle.text == filename:
self.logger.debug('deleting row with filename: %s' % (filename))
deleteEle = self.find_element(self.locators['delete'],e)
deleteEle.click()
loc = self.locators['deletecheck'] % (filename)
self.wait_until_not_present(locator=loc)
break
else:
raise NoSuchFileAttachmentError(
"file named \"%s\" not uploaded" % (filename))
class UploadList2_Locators_Base(object):
"""locators for UploadList2 object as seen on hubzero.org"""
locators = {
'base' : "css=#file-uploader-list",
'row' : "css=#file-uploader-list tr",
'filetype' : "css=#file-uploader-list td:nth-of-type(1)",
'filename' : "css=#file-uploader-list td:nth-of-type(2)",
'delete' : "css=#file-uploader-list .delete",
'deletecheck' : "xpath=//td//*[text()='%s']/../..",
}
class UploadList2_Locators_Base_2(object):
"""locators for UploadList2 object as see on dev.hubzero.org"""
locators = {
'base' : "css=#file-uploader-list",
'row' : "css=#file-uploader-list tr",
'filetype' : "css=",
'filename' : "css=#file-uploader-list td:nth-of-type(1)",
'delete' : "css=#file-uploader-list .delete",
'deletecheck' : "xpath=//td//*[text()='%s']/../..",
}
|
import scadnano as sc
def create_design():
length = 16
helices = [
sc.Helix(major_tick_distance=4, max_offset=length, position=sc.Position3D(x=0, y=0, z=0),
min_offset=0),
sc.Helix(major_tick_distance=4, max_offset=length, position=sc.Position3D(x=0, y=3, z=3),
min_offset=8),
sc.Helix(major_tick_distance=4, max_offset=length, position=sc.Position3D(x=2.5, y=-3, z=8),
min_offset=0),
sc.Helix(major_tick_distance=4, max_offset=length, position=sc.Position3D(x=2.5, y=1, z=11),
min_offset=8),
]
design = sc.Design(helices=helices, strands=[
sc.Strand([sc.Domain(0, True, 0, length)]),
sc.Strand([sc.Domain(1, True, 8, length)]),
sc.Strand([sc.Domain(2, True, 0, length)]),
sc.Strand([sc.Domain(3, True, 8, length)]),
], grid=sc.Grid.none)
return design
if __name__ == '__main__':
design = create_design()
design.write_scadnano_file(directory='output_designs')
|
''' Database Settings '''
from masonite import env
from dotenv import find_dotenv, load_dotenv
from orator import DatabaseManager, Model
'''
|--------------------------------------------------------------------------
| Load Environment Variables
|--------------------------------------------------------------------------
|
| Loads in the environment variables when this page is imported.
|
'''
load_dotenv(find_dotenv())
'''
|--------------------------------------------------------------------------
| Database Settings
|--------------------------------------------------------------------------
|
| Set connection database settings here as a dictionary. Follow the
| format below to create additional connection settings.
|
| @see Orator migrations documentation for more info
|
'''
DATABASES = {
'default': {
'driver': env('DB_DRIVER'),
'host': env('DB_HOST'),
'database': env('DB_DATABASE'),
'user': env('DB_USERNAME'),
'password': env('DB_PASSWORD'),
'prefix': ''
}
}
DB = DatabaseManager(DATABASES)
Model.set_connection_resolver(DB)
|
import io
import os
import json
import zipfile
import subprocess
import logging
import shlex
import pathlib
from io import BufferedReader
from typing import Union
from hashlib import md5
from django.utils import timezone
from datetime import datetime
from core.enums.log_type_enum import LogType
from core.exceptions import HashJSONFailedException
from core.enums.plugin_status import PluginStatus
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-9s) %(message)s',)
def get_MD5(file: Union[BufferedReader, str]) -> str:
"""
Creates a MD5 hash of a file
"""
buffered_file: BufferedReader = None
if isinstance(file, str):
buffered_file = open(file, 'rb')
else:
buffered_file = file
chunk_size = 8192
h = md5()
while True:
chunk = buffered_file.read(chunk_size)
if len(chunk):
h.update(chunk)
else:
break
return h.hexdigest()
def store_zip_file(file: BufferedReader, directory: str) -> None:
"""Stores the file in the given directory"""
os.mkdir(directory)
with open(directory + os.sep + file.name, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
def extract_zip(file: BufferedReader, directory: str):
"""
Extract zip file to specified directory
"""
with zipfile.ZipFile(file, 'r') as zip:
zip.extractall(directory)
def build_zip_json(zip_bytes: io.BytesIO, plugin_source) -> None:
"""
Builds a JSON file of the zip contents hashing each file and storing the hash.
{
filename: hash,
directory/filename: hash
}
"""
data = {}
zip = zipfile.ZipFile(zip_bytes)
for name in zip.namelist():
if not name.endswith('/'):
with zipfile.ZipFile.open(zip, name) as memberFile:
data[name] = get_MD5(memberFile)
plugin_source.source_file_hash = json.dumps(data)
plugin_source.save()
log_json: dict = {
'log_datetime': datetime.timestamp(timezone.now()),
'source_dest': plugin_source.source_dest,
'source_hash': plugin_source.source_hash,
'source_file_hash': json.loads(plugin_source.source_file_hash),
}
write_log(LogType.HASH_LIST, plugin_source, log_json)
def datetime_to_string(timezone: timezone) -> str:
return datetime.strftime(timezone, "%m/%d/%Y, %H:%M:%S")
def write_log(log_type: LogType, plugin_source, log: dict) -> None:
path = plugin_source.source_dest + os.sep + log_type.value + '_' + \
str(datetime.timestamp(plugin_source.upload_time)) + '.json'
with open(path, 'x') as file:
file.write(json.dumps(log))
def run_subprocess(command: 'list[str]', cwd=None, timeout=None, shell=False) -> subprocess.CompletedProcess:
try:
return subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, timeout=timeout, shell=shell, cwd=cwd)
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(command, e.returncode, e.output))
def create_venv(plugin):
"""
Create virtual env for the specified plugin
"""
try:
plugin.status = PluginStatus.VIRTUALENV
plugin.save()
venv_dest = plugin.plugin_dest + os.sep + '.venv'
venv_command = [
'python',
'-m',
'virtualenv',
venv_dest,
'-p',
plugin.python_version
]
venv_process: subprocess.CompletedProcess = run_subprocess(
venv_command)
plugin.status = PluginStatus.DEPENDECIES
plugin.stdout = venv_process.stdout.decode('utf-8')
plugin.stderr = venv_process.stderr.decode('utf-8')
plugin.save()
python = venv_dest + os.sep + 'bin' + os.sep + 'python'
requirements = plugin.plugin_dest + os.sep + 'requirements.txt'
deps_command = [
python,
'-m',
'pip',
'install',
'-r',
requirements
]
deps_process: subprocess.CompletedProcess = run_subprocess(
deps_command)
plugin.status = PluginStatus.SUCCESS
plugin.stdout = deps_process.stdout.decode('utf-8')
plugin.stderr = deps_process.stderr.decode('utf-8')
plugin.save()
except subprocess.CalledProcessError as cpe:
plugin.status = PluginStatus.Failed
plugin.stdout = cpe.stdout.decode('utf-8')
plugin.stderr = cpe.stderr.decode('utf-8')
plugin.save()
plugin.save()
# TODO: https://github.com/ICFL-UP/Yrden/issues/29
def validate_dir(directory: str, relative_path: str, hash_dict: dict):
"""
Recursive method to validate subdirectories
"""
for filename in os.listdir(directory):
f = os.path.join(directory, filename)
if os.path.isfile(f):
source_hash = hash_dict[relative_path + filename]
plugin_hash = get_MD5(f)
if source_hash != plugin_hash:
raise HashJSONFailedException(
f'Hash for {relative_path + filename} [{source_hash}] does not match computed hash [{plugin_hash}]')
elif os.path.isdir(f) and filename != '.venv':
validate_dir(directory + os.sep + filename, relative_path +
filename + '/', hash_dict)
def validate_plugin_hash(plugin) -> bool:
"""
Validate the plugin source file hashes
"""
validate_dir(plugin.plugin_dest, '', json.loads(
plugin.plugin_source.source_file_hash))
def get_python_choices() -> 'list[(int, str)]':
cwd = None
cmd = 'find /bin/ -type f -executable -print -exec file {} \\; | grep python | grep -wE "ELF" | grep -o "\\/bin\\/.*:"'
if os.name == 'nt':
cmd = 'dir "AppData\Local\Microsoft\WindowsApps\python*.exe" /b'
cwd = str(pathlib.Path.home())
args = shlex.split(cmd)
python_versions_cp = run_subprocess(command=args, cwd=cwd, shell=True)
versions = python_versions_cp.stdout.decode('utf-8').split('\n')
python_versions: list[str] = []
for version in versions:
if os.name == 'nt':
python_versions.append(version[:-1])
elif version.startswith('/bin/'):
python_versions.append(version[:-1])
python_choices = []
for index, value in enumerate(python_versions):
val = (index, value)
python_choices.append(val)
return python_choices
|
from dagster import composite_solid, pipeline, solid
from food_ke.scripts.modes import dev, prod
from food_ke.scripts.ner import (
chunk_articles,
get_articles,
get_spans_from_chunks,
)
@composite_solid(required_resource_defs={"ner_training_io_manager"})
def get_ner_training_data_composite_solid():
articles = get_articles()
get_spans_from_chunks(chunk_articles(articles))
@pipeline(mode_defs=[dev, prod])
def get_ner_training_data_pipeline():
get_ner_training_data_composite_solid()
|
import win32api
import win32con
class Controller:
@staticmethod
def left():
win32api.keybd_event(0x27, 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(0x25, 0, 0, 0)
@staticmethod
def right():
win32api.keybd_event(0x25, 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(0x27, 0, 0, 0)
@staticmethod
def up():
win32api.keybd_event(0x26, 0, 0, 0)
win32api.keybd_event(0x26, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def down():
win32api.keybd_event(0x28, 0, 0, 0)
win32api.keybd_event(0x28, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def straight():
win32api.keybd_event(0x25, 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(0x27, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def accelerate():
win32api.keybd_event(0x58, 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(0x5A, 0, 0, 0)
@staticmethod
def brake():
win32api.keybd_event(0x5A, 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(0x58, 0, 0, 0)
@staticmethod
def coast():
win32api.keybd_event(0x58, 0, win32con.KEYEVENTF_KEYUP, 0)
win32api.keybd_event(0x5A, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def start():
win32api.keybd_event(0x31, 0, 0, 0)
win32api.keybd_event(0x31, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def insertCoin():
win32api.keybd_event(0x35, 0, 0, 0)
win32api.keybd_event(0x35, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def pause():
win32api.keybd_event(0x70, 0, 0, 0)
win32api.keybd_event(0x70, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def nextFrame():
win32api.keybd_event(0x71, 0, 0, 0)
win32api.keybd_event(0x71, 0, win32con.KEYEVENTF_KEYUP, 0)
@staticmethod
def startGame():
insertCoin()
start()
@staticmethod
def changeView():
win32api.keybd_event(0x10, 0, 0, 0)
win32api.keybd_event(0x10, 0, win32con.KEYEVENTF_KEYUP, 0) |
import Stats
import dbutils as db
from Stats import DM
# shorthand
split = DM.split_on_attributes
array = Stats.array
def rows_to_data(rows):
return array([(r['lateness'], r['trip_stop_weight']) for r in rows]);
## Initial retrieval and sorting of data
# Grab data needed
cur = db.get_cursor()
db.SQLExec(cur,Stats.comparison_sql);
rows = cur.fetchall();
cur.close()
# Service ID split
sids = split( ('service_id',), rows);
weekday_rows = sids[('1',)]
saturday_rows = sids[('2',)]
sunday_rows = sids[('3',)]
weekend_rows = saturday_rows + sunday_rows
# DoW split
dows = split( ('wday',), rows);
# Hour of day splits
weekday_hoas = split( ('hoa',), weekday_rows );
hoas = split( ('hoa',), rows );
# Route progress splits
stop_numbers = split( ('stop_number',), rows);
end_numbers = split( ('stops_before_end',), rows);
portions = split( ('route_portion',), rows);
data = rows_to_data(rows)
del rows
weekend_data = rows_to_data(weekend_rows)
del weekend_rows
saturday_data = rows_to_data(saturday_rows)
sunday_data = rows_to_data(sunday_rows)
weekday_data = rows_to_data(weekday_rows)
del saturday_rows,sunday_rows,weekday_rows
hoa_data = {}
for i in range(24):
hoa_data[i] = rows_to_data(hoas[(i,)])
weekday_hoa_data = {}
for i in range(24):
weekday_hoa_data[i] = rows_to_data(weekday_hoas[(i,)])
hoa_8 = weekday_hoa_data[8]
hoa_17 = weekday_hoa_data[17]
hoa_20 = weekday_hoa_data[20]
hoa_1 = weekday_hoa_data[1]
begin_route_rows = []
mid_route_rows = []
end_route_rows = []
for k in portions.keys():
if k[0] < 25: begin_route_rows += portions[k]
elif k[0] < 75: mid_route_rows += portions[k]
else: end_route_rows += portions[k]
portion_data = {}
portion_data['Start of Route'] = rows_to_data(begin_route_rows)
portion_data['Middle of Route'] = rows_to_data(mid_route_rows)
portion_data['End of Route'] = rows_to_data(end_route_rows)
begin_data = portion_data['Start of Route']
mid_data = portion_data['Middle of Route']
end_data = portion_data['End of Route']
pot_overall = Stats.p_make_transfer_vs_window(data,doplot=False)
pot_8 = Stats.p_make_transfer_vs_window(hoa_8,doplot=False)
#pot_12 = Stats.p_make_transfer_vs_window(hoa_12,doplot=False)
pot_17 = Stats.p_make_transfer_vs_window(hoa_17,doplot=False)
pot_20 = Stats.p_make_transfer_vs_window(hoa_20,doplot=False)
pot_1 = Stats.p_make_transfer_vs_window(hoa_1,doplot=False)
pot_weekend = Stats.p_make_transfer_vs_window(weekend_data,doplot=False)
pot_weekday = Stats.p_make_transfer_vs_window(weekday_data,doplot=False)
pot_saturday = Stats.p_make_transfer_vs_window(saturday_data,doplot=False)
pot_sunday = Stats.p_make_transfer_vs_window(sunday_data,doplot=False)
pot_end_to_begin = Stats.p_make_transfer_vs_window(end_data,begin_data,
doplot=False)
pot_end_to_mid = Stats.p_make_transfer_vs_window(end_data,mid_data,
doplot=False)
pot_mid_to_mid = Stats.p_make_transfer_vs_window(mid_data,
doplot=False)
pot_mid_to_begin = Stats.p_make_transfer_vs_window(mid_data,begin_data,
doplot=False)
potlabs = {}
potlabs['Overall'] = pot_overall;
potlabs['8am Weekday'] = pot_8;
potlabs['5pm Weekday'] = pot_17;
potlabs['8pm Weekday'] = pot_20;
potlabs['1am Weekday'] = pot_1;
potlabs['Weekday'] = pot_weekday
potlabs['Saturday'] = pot_saturday
potlabs['Sunday'] = pot_sunday
potlabs['End to Start'] = pot_end_to_begin;
potlabs['End to Middle'] = pot_end_to_mid;
potlabs['Middle to Middle'] = pot_mid_to_mid;
potlabs['Middle to Start'] = pot_mid_to_begin;
figure()
plot(pot_overall[:,0],pot_overall[:,1],'k',label="Overall")
plot(pot_8[:,0],pot_8[:,1],'--',label="8 am Weekday")
plot(pot_17[:,0],pot_17[:,1],'--',label="5 pm Weekday")
plot(pot_1[:,0],pot_1[:,1],'--',label="1 am Weekday")
plot(pot_end_to_begin[:,0],pot_end_to_begin[:,1],
':',label="End to Start",linewidth=2)
plot(pot_end_to_mid[:,0],pot_end_to_mid[:,1],
':',label="End to Middle",linewidth=2)
plot(pot_mid_to_mid[:,0],pot_mid_to_mid[:,1],
':',label="Middle to Middle",linewidth=2)
plot(pot_mid_to_begin[:,0],pot_mid_to_begin[:,1],
':',label="Middle to Start",linewidth=2)
xlabel("Transfer window (s)")
ylabel("Probability of making transfer")
title("Probability of making transfer vs transfer windows")
legend(loc=4)
#########################################################
#########################################################
#########################################################
|
#!/usr/bin/env python
from __future__ import print_function
import os
from glob import glob
def getsections():
for filename in (filename for filename in glob("*.md") if filename != "index.md"):
with open(filename, 'r') as f:
yield (True, filename, "")
for section in filter(lambda x:x.startswith("## "), f.readlines()):
yield (False, filename, section.replace("## ", "").strip())
def generateindex(sections):
yield "---"
yield "layout: docsplus"
yield "title: Functions"
yield "section: language"
yield "---"
for (toggle, filename, section) in sections:
if toggle:
yield "## {}".format(filename.replace(".md", ""))
else:
yield "[{}]({})".format(section, "/functions/{}#{}".format(filename.replace(".md", ".html"), section.lower().replace(" ", "-").replace(",", "")))
if __name__=="__main__":
print('\n\n'.join(list(generateindex(getsections()))))
|
import os
import uuid
import xml
import xml.etree.ElementTree
from xml.etree import ElementTree
import pytest
from pyPreservica import *
FOLDER_ID = "ebd977f6-bebd-4ecf-99be-e054989f9af4"
ASSET_ID = "683f9db7-ff81-4859-9c03-f68cfa5d9c3d"
CO_ID = "0f2997f7-728c-4e55-9f92-381ed1260d70"
XML_DOCUMENT = "<person:Person xmlns:person='https://www.person.com/person'>" \
"<person:Name>Name</person:Name>" \
"<person:Phone>01234 100 100</person:Phone>" \
"<person:Email>test@test.com</person:Email>" \
"<person:Address>Abingdon, UK</person:Address>" \
"</person:Person>"
def test_get_folder_metadata():
client = EntityAPI()
entity = client.entity(EntityType.FOLDER, FOLDER_ID)
xml_string = client.metadata_for_entity(entity, "http://purl.org/dc/elements/1.1/")
assert xml_string is not None
document = xml.etree.ElementTree.fromstring(xml_string)
identifier = document.find(".//{http://purl.org/dc/elements/1.1/}identifier")
assert identifier.text == "LC-USZ62-43601"
def test_update_folder_metadata():
client = EntityAPI()
entity = client.entity(EntityType.FOLDER, FOLDER_ID)
xml_string = client.metadata_for_entity(entity, "http://purl.org/dc/elements/1.1/")
assert xml_string is not None
document = xml.etree.ElementTree.fromstring(xml_string)
identifier = document.find(".//{http://purl.org/dc/elements/1.1/}identifier")
assert identifier.text == "LC-USZ62-43601"
description = document.find(".//{http://purl.org/dc/elements/1.1/}description")
assert description.text == "a"
description.text = "description"
xml_string = ElementTree.tostring(document, encoding='utf-8').decode("utf-8")
folder = client.update_metadata(entity, "http://purl.org/dc/elements/1.1/", xml_string)
document = xml.etree.ElementTree.fromstring(client.metadata_for_entity(folder, "http://purl.org/dc/elements/1.1/"))
description = document.find(".//{http://purl.org/dc/elements/1.1/}description")
assert description.text == "description"
description.text = "a"
xml_string = ElementTree.tostring(document, encoding='utf-8').decode("utf-8")
folder = client.update_metadata(entity, "http://purl.org/dc/elements/1.1/", xml_string)
def test_add_folder_metadata_string():
client = EntityAPI()
entity = client.entity(EntityType.FOLDER, FOLDER_ID)
assert len(entity.metadata) == 3
folder = client.add_metadata(entity, "https://www.person.com/person", XML_DOCUMENT)
assert len(folder.metadata) == 4
xml_string = client.metadata_for_entity(folder, "https://www.person.com/person")
document = xml.etree.ElementTree.fromstring(xml_string)
name = document.find(".//{https://www.person.com/person}Name")
assert name.text == "Name"
folder = client.delete_metadata(folder, "https://www.person.com/person")
assert len(folder.metadata) == 3
def test_get_asset_metadata():
client = EntityAPI()
entity = client.entity(EntityType.ASSET, ASSET_ID)
xml_string = client.metadata_for_entity(entity, "http://purl.org/dc/elements/1.1/")
assert xml_string is not None
document = xml.etree.ElementTree.fromstring(xml_string)
filename = document.find(".//{http://purl.org/dc/elements/1.1/}filename")
assert filename.text == "LC-USZ62-20901.tiff"
def test_get_all_asset_metadata():
client = EntityAPI()
entity = client.entity(EntityType.ASSET, ASSET_ID)
for m in client.all_metadata(entity):
assert m[0] is not None
document = xml.etree.ElementTree.fromstring(m[1])
assert document is not None
def test_get_co_metadata():
client = EntityAPI()
entity = client.entity(EntityType.CONTENT_OBJECT, CO_ID)
entity = client.delete_metadata(entity, "https://www.person.com/person")
xml_string = client.metadata_for_entity(entity, "https://www.person.com/person")
assert xml_string is None
co = client.add_metadata(entity, "https://www.person.com/person", XML_DOCUMENT)
xml_string = client.metadata_for_entity(co, "https://www.person.com/person")
document = xml.etree.ElementTree.fromstring(xml_string)
name = document.find(".//{https://www.person.com/person}Name")
assert name.text == "Name"
e = client.delete_metadata(co, "https://www.person.com/person")
xml_string = client.metadata_for_entity(e, "https://www.person.com/person")
assert xml_string is None
def test_get_folder_metadata_file():
client = EntityAPI()
entity = client.entity(EntityType.FOLDER, FOLDER_ID)
assert len(entity.metadata) == 3
filename = str(uuid.uuid4()) + ".xml"
fd = open(filename, "wt", encoding="utf-8")
fd.write(XML_DOCUMENT)
fd.flush()
fd.close()
with open(filename, "rt", encoding="utf-8") as file:
folder = client.add_metadata(entity, "https://www.person.com/person", file)
assert len(folder.metadata) == 4
xml_string = client.metadata_for_entity(folder, "https://www.person.com/person")
document = xml.etree.ElementTree.fromstring(xml_string)
name = document.find(".//{https://www.person.com/person}Name")
assert name.text == "Name"
folder = client.delete_metadata(folder, "https://www.person.com/person")
assert len(folder.metadata) == 3
os.remove(filename)
def test_get_asset_metadata_file():
client = EntityAPI()
entity = client.entity(EntityType.ASSET, ASSET_ID)
assert len(entity.metadata) == 2
filename = str(uuid.uuid4()) + ".xml"
fd = open(filename, "wt", encoding="utf-8")
fd.write(XML_DOCUMENT)
fd.flush()
fd.close()
with open(filename, "rt", encoding="utf-8") as file:
asset = client.add_metadata(entity, "https://www.person.com/person", file)
assert len(asset.metadata) == 3
xml_string = client.metadata_for_entity(asset, "https://www.person.com/person")
document = xml.etree.ElementTree.fromstring(xml_string)
name = document.find(".//{https://www.person.com/person}Name")
assert name.text == "Name"
asset = client.delete_metadata(asset, "https://www.person.com/person")
assert len(asset.metadata) == 2
os.remove(filename)
|
from unittest import TestCase
from taskobra.orm import get_engine, get_session, ORMBase
class ORMTestCase(TestCase):
def tearDown(self):
with get_session(bind=get_engine("sqlite:///:memory:")) as session:
for table in session.execute("SELECT * FROM sqlite_master WHERE type='table'"):
session.execute(f"DELETE FROM '{table[1]}'")
|
""" A CatController Module """
from masonite.controllers import Controller
from masonite.request import Request
from app.Cat import Cat
class CatController(Controller):
"""Class Docstring Description
"""
def __init__(self, request:Request):
self.request = request
def show(self):
id = self.request.param("id")
return Cat.where("id", id).get()
def index(self):
return Cat.all()
def create(self):
cat = self.request.only("name", "color", 'breed', 'image', 'description')
return Cat.create(cat)
def update(self):
id = self.request.param("id")
cat = self.request.only("name", "color", 'breed', 'image', 'description')
Cat.where("id", id).update(cat)
return Cat.where("id", id).get()
def destroy(self):
id = self.request.param("id")
cat = Cat.where("id", id).get()
Cat.where("id", id).delete()
return cat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
These are chebychev polynomials on a rotated domain. While they should be the chebychev polynomials form
F(ix) = Sum_i c_i * C_i(x / i)
so that evaluated along the imaginary line they are standard chebychev's, these are not those. Instead they are the chebychevs of
F(ix) = Sum_i c_i * (C_ei(x) + iC_oi(x))
such that all of the c_i coefficients are pure real, but the even polynomials provide the real part and the odd ones the imaginary part. This is easy to create a vandermonde matrix from and easy to solve. The roots of this form of polynomial will be paired left/right over the imaginary line rather than paired with conjugates as for pure real coefficients.
To relate the coefficients to the original requires them to be rotated 90 degrees
"""
import numpy as np
from . import chebychev
# from . import standard
valfromroots_lnG = chebychev.valfromroots_lnG
coeff_canonicalization_gain = chebychev.coeff_canonicalization_gain
def roots(c, X_scale=1):
c = np.asarray(c, complex)
c2 = np.copy(c)
if len(c) % 2 == 1:
c2[1::2] *= 1j
else:
c2[0::2] *= -1j
roots = chebychev.roots(c2, X_scale=X_scale)
SGN = c[0] / c[-1]
count = roots.imag < 0
print(c)
print("CHECK COUNT!", (c[0]), SGN, np.sum(count), len(roots))
# for root in roots:
# print(root, abs(chebychev.val_lnG(root, c2, X_scale = X_scale)[0]))
roots = 1j * roots
return roots
def roots_lnG(c, X_scale=1):
c = np.asarray(c, complex)
c2 = np.copy(c)
if len(c) % 2 == 1:
c2[1::2] *= 1j
else:
c2[0::2] *= -1j
roots, lnG = chebychev.roots_lnG(c2, X_scale=X_scale)
roots = 1j * roots
# TODO, repair the real poly conjugate matching
return roots, lnG
def fromroots_lnG(roots, X_scale=1):
roots = np.asarray(roots, complex)
roots2 = roots * -1j
c, lnG = chebychev.fromroots_lnG(roots2, X_scale=X_scale)
if len(roots) % 2 == 0:
c[1::2] *= -1j
else:
c[0::2] *= +1j
assert np.all(c.imag == 0)
return c.real, lnG
def val_lnG(X, c, X_scale=1, lnG=0):
c2 = np.asarray(c, complex)
c2 = np.copy(c2)
c2[1::2] *= +1j
if len(c2) % 2 == 1:
# print('A2')
return chebychev.val_lnG(X * -1j, c2, X_scale=X_scale, lnG=lnG)
else:
# print('B2')
val, lnG = chebychev.val_lnG(X * -1j, c2, X_scale=X_scale, lnG=lnG)
return -val, lnG
def vander_lnG(X, N, X_scale=1, lnG=0):
V, lnG = chebychev.vander_lnG(X * +1j, N, X_scale=X_scale, lnG=lnG)
V = V.astype(complex)
if N % 2 == 1:
V[:, 1::2] *= 1j
V[:, 0::2] *= -1
else:
V[:, 1::2] *= -1j
return V, lnG
def companion(c):
c2 = np.asarray(c, complex)
c2 = np.copy(c2)
if len(c) % 2 == 1:
c2[1::2] *= 1j
else:
c2[0::2] *= 1j
return 1j * np.polynomial.chebyshev.chebcompanion(c2)
|
import pickle
import cv2 as cv
import numpy as np
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from config import im_size, pickle_file
class adict(dict):
def __init__(self, *av, **kav):
dict.__init__(self, *av, **kav)
self.__dict__ = self
def pad_collate(batch):
max_question_len = float('-inf')
max_answer_len = float('-inf')
for elem in batch:
_, question, answer = elem
max_question_len = max_question_len if max_question_len > len(question) else len(question)
max_answer_len = max_answer_len if max_answer_len > len(answer) else len(answer)
for i, elem in enumerate(batch):
image, question, answer = elem
question = np.pad(question, (0, max_question_len - len(question)), 'constant', constant_values=0)
answer = np.pad(answer, (0, max_answer_len - len(answer)), 'constant', constant_values=0)
batch[i] = (image, question, answer)
return default_collate(batch)
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
class MsCocoVqaDataset(Dataset):
def __init__(self, mode='train'):
self.mode = mode
self.QA = adict()
with open(pickle_file, 'rb') as file:
data = pickle.load(file)
self.QA.VOCAB = data['VOCAB']
self.QA.IVOCAB = data['IVOCAB']
self.train = data['train']
self.val = data['val']
if mode == 'train':
self.transformer = data_transforms['train']
def set_mode(self, mode):
self.mode = mode
def __len__(self):
if self.mode == 'train':
return len(self.train[0])
elif self.mode == 'val':
return len(self.val[0])
def __getitem__(self, index):
if self.mode == 'train':
images, questions, answers = self.train
prefix = 'data/train2014/COCO_train2014_0000'
else: # self.mode == 'val':
images, questions, answers = self.val
prefix = 'data/val2014/COCO_val2014_0000'
image_id = int(images[index])
image_id = '{:08d}'.format(image_id)
filename = prefix + image_id + '.jpg'
img = cv.imread(filename)
img = cv.resize(img, (im_size, im_size))
img = transforms.ToPILImage()(img)
img = self.transformer(img)
question = questions[index]
answer = answers[index]
return img, question, answer
if __name__ == '__main__':
dset_train = MsCocoVqaDataset()
train_loader = DataLoader(dset_train, batch_size=2, shuffle=True, collate_fn=pad_collate)
for batch_idx, data in enumerate(train_loader):
images, questions, answers = data
print('answers.size(): ' + str(answers.size()))
break
print(len(dset_train.QA.VOCAB))
|
'''邻接矩阵'''
import numpy as np
import xlrd
data = xlrd.open_workbook('HLM.xlsx')
table = data.sheets()[1]
m, n = table.nrows, table.ncols
# i = j = 0
s = t = i = 0
flag = 0
# print(m,n)
def cell(x, y):
return table.cell(x, y).value
ss = set()
for i in range(m):
for j in range(n):
if cell(i, j) != "":
ss.add(cell(i, j))
lst = [i for i in range(len(ss))]
lst1 = list(ss)
# print(lst1)
''''''
arr = np.zeros((len(ss), len(ss)))
np.set_printoptions(threshold=np.NaN)
while s < m:
while t < n and cell(s, t) != "":
while i < n and cell(s, i) != "":
if i != t and cell(s, t) != cell(s, i):
ii, jj = lst1.index(cell(s, t)), lst1.index(cell(s, i))
# print(ii,jj)
arr[ii][jj] += 1
# arr[jj][ii] += 1
i = i + 1
i = 0
t = t + 1
t = 0
s = s + 1
for ff in range(arr.shape[0]):
for gg in range(arr.shape[0]):
if arr[ff][gg] < 9:
arr[ff][gg] = 0
# print(arr.shape)
to_r = []
temp = [0 for i in range(arr.shape[0])]
for i in range(arr.shape[0]):
if sum(arr[:, i]) == 0:
to_r.append(i)
temp[i] = 1
for i in range(arr.shape[0]):
if temp[i] == 0:
print(lst1[i], end='\t')
print()
arr = np.delete(arr, to_r, 0)
arr = np.delete(arr, to_r, 1)
# print(arr.shape)
for ff in range(arr.shape[0]):
for gg in range(arr.shape[0]):
print(int(arr[ff][gg]), end='\t')
print()
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from . import common
from .. import rw
from ..glossary import DEFAULT_TIMEOUT
from .base import BaseMessage
class CancelMessage(BaseMessage):
__slots__ = BaseMessage.__slots__ + (
'ttl',
'tracing',
'why',
)
def __init__(self, ttl=DEFAULT_TIMEOUT, tracing=None, why=None, id=0):
super(CancelMessage, self).__init__(id)
self.ttl = ttl
self.tracing = tracing or common.Tracing(0, 0, 0, 0)
self.why = why or ''
cancel_rw = rw.instance(
CancelMessage,
('ttl', rw.number(4)), # ttl:4
('tracing', common.tracing_rw), # tracing:24
('why', rw.len_prefixed_string(rw.number(2))), # why:2
)
|
from lego.apps.ical.models import ICalToken
from lego.apps.users.models import User
from lego.utils.test_utils import BaseTestCase
class TokenTestCase(BaseTestCase):
fixtures = ['test_abakus_groups.yaml', 'test_meetings.yaml', 'test_users.yaml']
def setUp(self):
self.user = User.objects.get(id=1)
def test_generate_initial_token(self):
token = ICalToken.objects.get_or_create(user=self.user)[0]
self.assertEqual(token.user, self.user)
self.assertEqual(len(token.token), 64)
def test_regenerate_token(self):
token = ICalToken.objects.get_or_create(user=self.user)[0]
token_old = token.token
token.delete()
token = ICalToken.objects.create(user=self.user)
self.assertNotEqual(token.token, token_old)
self.assertEqual(len(token.token), 64)
|
"""
Name: arXiv Intelligence NER Web Service
Authors: Jonathan CASSAING
Web service specialized in Named Entity Recognition (NER), in Natural Language Processing (NLP)
"""
import json
import time
# PDF list used as references to compare
# the extracted named entities
# The named entities as references are saved
# in the folder tests/reference_json
# These json files match with this PDF list
pdf_list = ["https://arxiv.org/pdf/2203.10451.pdf",
"https://arxiv.org/pdf/2203.10525.pdf",
"https://arxiv.org/pdf/2203.08617.pdf",
"https://arxiv.org/pdf/2203.07998.pdf",
"https://arxiv.org/pdf/2203.07993.pdf",
"https://arxiv.org/pdf/2203.07782.pdf",
"https://arxiv.org/pdf/2203.07676.pdf",
"https://arxiv.org/pdf/2203.07507.pdf",
"https://arxiv.org/pdf/2203.08111.pdf",
"https://arxiv.org/pdf/2203.08015.pdf"]
def get_medatadata(client, doc_url):
"""Returns metadata of a document"""
# Upload the document
response = client.get("/?doc_url=" + doc_url)
if response is None:
print("GET /: Error while sending the file: %s", doc_url)
return None
# Convert response to JSON
message = json.loads(response.get_data(as_text=True))
if message["id"] != -1:
status = "PENDING"
# While the status is not SUCCESS or ERROR
while status == "PENDING":
# Wait some seconds before request
time.sleep(2)
# Request metadata
response = client.get("/document/metadata/" + str(message["id"]))
if response is None:
print("GET /document/metadata/: \
Error while retrieving metadata of the file: %s", message["id"])
status = "ERROR"
break
# Convert response to JSON
data = json.loads(response.get_data(as_text=True))
# Save the new status
status = data["status"]
# If while is finished, return data
return data
return None
def test_performance_measurement(client):
"""This function compare the named entities from
the json files in the folder tests/reference_json
with the real responses of the Web Service"""
for pdf in pdf_list:
# For each PDF, we get metadata
data = get_medatadata(client, pdf)
if data is None:
continue
# Build the dataset to compare
# We convert the JSON format
# with the same format that
# the JSON files in tests/reference_json
data_to_compare = dict()
data_to_compare["pdf_url"] = pdf
data_to_compare["named_entities"] = []
for named_entity in data["named_entities"]:
if named_entity["type"] == "PERSON":
data_to_compare["named_entities"].append(named_entity["text"])
# We remove duplicates from the list
data_to_compare["named_entities"] = list(dict.fromkeys(data_to_compare["named_entities"]))
# Build the dataset as reference
# We read all JSON in tests/reference_json
filename = "tests/reference_json/" + pdf.rsplit('/', 1)[1] + ".json"
with open(filename, 'r', encoding="utf-8") as file:
ref_data = file.read()
ref_data = json.loads(ref_data)
# We compare both datasets
named_entities_match = 0
named_entities_error = 0
for named_entity in data_to_compare["named_entities"]:
if named_entity in ref_data["named_entities"]:
# For each named entity found
named_entities_match += 1
else:
# If the named entity is a false positive
named_entities_error += 1
# Computing the score
accuracy = (named_entities_match * 100) / len(ref_data["named_entities"])
result = dict()
result["pdf_url"] = pdf
result["named_entities_found"] = named_entities_match
result["actual_named_entities"] = len(ref_data["named_entities"])
result["accuracy"] = str(round(accuracy, 3)) + " %"
result["accuracy_description"] = "Percentage based on number of named entities found "\
"vs. reference"
result["error"] = named_entities_error
result["error_description"] = "Number of named entities as false positives"
# Saving result in tests/reference_json
filename = "tests/reference_json/result_" + pdf.rsplit('/', 1)[1] + ".json"
with open(filename, 'w', encoding="utf-8") as file:
json.dump(result, file)
def create_json_comparator(client):
"""This function create json files in the folder
tests/reference_json. These files are used, as references,
for named entities performance measurement"""
for pdf in pdf_list:
# For each PDF, we get metadata
data = get_medatadata(client, pdf)
if data is None:
continue
# We keep the PDF url
# and the PERSON named entities
json_dict = dict()
json_dict["pdf_url"] = pdf
json_dict["named_entities"] = []
for named_entity in data["named_entities"]:
if named_entity["type"] == "PERSON":
json_dict["named_entities"].append(named_entity["text"])
# We remove duplicates from the list
json_dict["named_entities"] = list(dict.fromkeys(json_dict["named_entities"]))
# The json files are saved in tests/reference_json
filename = "tests/reference_json/" + pdf.rsplit('/', 1)[1] + ".json"
with open(filename, 'w', encoding="utf-8") as file:
json.dump(json_dict, file)
|
from talon.voice import Context
from . import browser
from ...misc import audio
context = Context(
"netflix", func=browser.url_matches_func("https://www.netflix.com/.*")
)
context.keymap(
{"full screen": [lambda m: audio.set_volume(100), browser.send_to_page("f")]}
)
|
#Q: Add all the natural numbers below one thousand that are multiples of 3 or 5.
#A: 233168
sum([i for i in xrange(1,1000) if i%3==0 or i%5==0])
|
import sys
import subprocess as sp
LinuxDis = sys.argv[1]
## python3-gdalのインストール
if LinuxDis == "Ubuntu":
sp.call("sudo apt-get install -y python3-gdal", shell=True)
elif LinuxDis == "CentOS":
sp.call("sudo yum install -y python3-gdal", shell=True)
## requirements.txtからPIPでインストール
with open("./requirements.txt", "r") as f:
library = f.read().split("\n")
sp.call("pip install -r requirements.txt", shell=True)
|
import requests
import logging
import pytz
from django.core.mail import EmailMessage
from vaccine_tracker.models import UsersData
from celery import shared_task
from tracker.celery import app
from vaccine_tracker.email import email_body, email_subject
from django.conf import settings
from django_celery_results.models import TaskResult
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36"}
@app.task
def clean_up_task_result_every_15_mins():
time_threshold = datetime.now(tz=pytz.UTC) - timedelta(minutes=10)
tasks = TaskResult.objects.filter(date_created__lt=time_threshold)
try:
tasks.delete()
except Exception as e:
logger.info("[INFO] Cannot delete")
else:
logger.info("[INFO] Deleted tasks results")
@app.task
def check_for_slot_every_15_mins():
users = UsersData.objects.all()
for user in users:
print(user.district_id, user.pincode, user.district_name, user.email_id, user.min_age_limit)
email_data = check_slot_available(user.district_id, user.district_name, user.pincode, user.min_age_limit)
if len(email_data) > 0:
SendEmailTask.delay(user.district_name, to_email=[user.email_id], email_data=email_data)
@shared_task
def SendEmailTask(district_name, to_email, email_data):
"""
Celery task for sending email. uses default django mailer
Please do configure email settings in settings.py file
:param email_data:
:param to_email: to email
:return:
"""
email = EmailMessage(subject=email_subject(district_name),
body=email_body(email_data),
from_email=settings.EMAIL_HOST_USER,
to=to_email)
logger.info(f'[INFO] Sending email to {to_email}')
email.send()
logger.info(f'[INFO] Email has been successfully send')
def check_slot_available(district_id, district_name, pincode, age_limit=18):
date = datetime.now().strftime("%d-%m-%Y")
response = requests.get(f"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public"
f"/calendarByDistrict?district_id={district_id}&date={date}", headers=headers)
email_data = []
if response.status_code == 200:
print("got the response now checking")
json = response.json()
centers = json.get('centers')
for center in centers:
if center.get('pincode') == pincode or center.get('district_name') == district_name:
for session in center.get('sessions'):
capacity = []
if session.get('available_capacity') >= 2:
if session.get('min_age_limit') == int(age_limit):
capacity.append({'available_capacity': session.get('available_capacity'), 'date': session.get('date')})
if len(capacity) >= 1:
email_data.append({'center_name': center.get('name'), 'pincode': center.get('pincode'), 'available_capacity': capacity})
return email_data
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.cluster import SpectralClustering
from sklearn.datasets import make_blobs
from sklearn.neighbors import kneighbors_graph
from scipy.sparse import *
from scipy import *
from autosp import predict_k
def consistent_labels(labels):
"""Achieve "some" consistency of color between true labels and pred labels.
Parameters
----------
labels : array of integers, shape: n_samples
The labels of the clusters.
Returns
----------
color_map : dict object {integer: integer}
The map of labels.
"""
color_map = {}
i = 0
v = 0
while v != max(labels) + 1:
if labels[i] in color_map:
pass
else:
color_map[labels[i]] = v
v += 1
i += 1
return color_map
if __name__ == "__main__":
# Generate artificial datasets.
number_of_blobs = 6 # You can change this!!
datax = [0.3, 0.4, 0.6, 0.2, 0.5, 0.4]
datay = [0.2, 0.5, 0.4, 0.6, 0.2, 0.6]
# Calculate affinity_matrix.
affinity_matrix = csr_matrix( (6,6), dtype=float )
affinity_matrix[0,1] = 0.8
affinity_matrix[1,0] = 0.8
affinity_matrix[0,2] = 0.6
affinity_matrix[2,0] = 0.6
affinity_matrix[0,4] = 0.1
affinity_matrix[4,0] = 0.1
affinity_matrix[1,2] = 0.8
affinity_matrix[2,1] = 0.8
affinity_matrix[2,3] = 0.2
affinity_matrix[3,2] = 0.2
affinity_matrix[3,4] = 0.8
affinity_matrix[4,3] = 0.8
affinity_matrix[3,5] = 0.7
affinity_matrix[5,3] = 0.7
affinity_matrix[4,5] = 0.8
affinity_matrix[5,4] = 0.8
print affinity_matrix.todense()
# auto_spectral_clustering
k = predict_k(affinity_matrix)
sc = SpectralClustering(n_clusters=k,
affinity="precomputed",
assign_labels="kmeans").fit(affinity_matrix)
labels_pred = sc.labels_
print("%d blobs(artificial datasets)." % number_of_blobs)
print("%d clusters(predicted)." % k)
# Plot.
from pylab import *
labels_true = [0,0,0,1,1,1]
t_map = consistent_labels(labels_true)
t = [t_map[v] for v in labels_true]
p_map = consistent_labels(labels_pred)
p = [p_map[v] for v in labels_pred]
print labels_pred
print p_map
print t
print p
subplot(211)
title("%d blobs(artificial datasets)." % number_of_blobs)
scatter(datax, datay, s=150, c=t)
subplot(212)
title("%d clusters(predicted)." % k)
scatter(datax, datay, s=150, c=p)
show()
|
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import azure_blob_helper
SAVE_DIR = "/data/mnist/checkpoints/"
tf.app.flags.DEFINE_integer('model_version', 2, 'version number of the model.')
FLAGS = tf.app.flags.FLAGS
class Model:
sess = tf.InteractiveSession()
serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
x = tf.identity(tf_example['x'], name='x') # use tf.identity() to assign name
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, w) + b
values, indices = tf.nn.top_k(y, 10)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
tf.constant([str(i) for i in xrange(10)]))
prediction_classes = table.lookup(tf.to_int64(indices))
def train(self):
# Import training data
mnist = input_data.read_data_sets('/app/MNIST_data/', one_hot=True)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=self.y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
self.sess.run(train_step, feed_dict={self.x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(self.sess.run(accuracy, feed_dict={self.x: mnist.test.images,
y_: mnist.test.labels}))
def predict(self, x):
feed_dict = {self.x: x}
prediction = self.sess.run(tf.nn.softmax(self.y), feed_dict)
return prediction
def save(self, toblob = False):
if os.path.isdir(SAVE_DIR) == False:
os.makedirs(SAVE_DIR)
saver = tf.train.Saver()
save_path = saver.save(self.sess, os.path.join(SAVE_DIR, "model"))
print("Model saved in file: %s" % save_path)
if toblob:
azure_blob_helper.upload_checkpoint_files(SAVE_DIR)
print("Model saved to blob")
def export(self, toblob = False):
tf.global_variables_initializer().run()
# Export model to tensorflow serving
export_path = os.path.join(SAVE_DIR, str(FLAGS.model_version))
print("Exporting trained model to %s" % export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
# Build the signature_def_map.
classification_inputs = tf.saved_model.utils.build_tensor_info(
self.serialized_tf_example)
classification_outputs_classes = tf.saved_model.utils.build_tensor_info(
self.prediction_classes)
classification_outputs_scores = tf.saved_model.utils.build_tensor_info(self.values)
classification_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={
tf.saved_model.signature_constants.CLASSIFY_INPUTS:
classification_inputs
},
outputs={
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
classification_outputs_classes,
tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
classification_outputs_scores
},
method_name=tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME))
tensor_info_x = tf.saved_model.utils.build_tensor_info(self.x)
tensor_info_y = tf.saved_model.utils.build_tensor_info(self.y)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'images': tensor_info_x},
outputs={'scores': tensor_info_y},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
builder.add_meta_graph_and_variables(
self.sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict_images':
prediction_signature,
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
classification_signature,
},
legacy_init_op=legacy_init_op)
builder.save()
print("Done exporting!")
def restore(self, fromblob = False):
if os.path.isdir(SAVE_DIR) == False:
os.makedirs(SAVE_DIR)
if fromblob:
azure_blob_helper.download_checkpoint_files(SAVE_DIR)
saver = tf.train.Saver()
#saver = tf.train.import_meta_graph(os.path.join(save_dir, "model.meta"))
saver.restore(self.sess, os.path.join(SAVE_DIR, "model"))
print("Model restored from: %s" % os.path.join(SAVE_DIR, "model"))
|
class FileManager:
"""
Class to implement file access to store inventory to maintain persistence
"""
def __init__(self):
self.inventory = ''
@staticmethod
def clear_inventory_file():
"""
Static method to clear inventory file after winning a game
Returns:
bool
"""
try:
with open('inventory', 'w') as file:
file.write('')
return True
except OSError:
return False
@staticmethod
def write_inventory_file(inventory_item):
"""
Static method to write inventory item to inventory file upon picking it up
Params:
inventory_item: str
Returns:
bool
"""
try:
with open('inventory', 'w') as file:
file.write(inventory_item)
return True
except OSError:
return False
def read_inventory_file(self):
"""
Method to read inventory file and return its contents
Return:
str or bool
"""
try:
with open('inventory', 'r') as file:
self.inventory = file.read()
return self.inventory
except OSError:
return False
|
"""
You are given a dictionary/hash/object containing some languages and your test results in the given languages.
Return the list of languages where your test score is at least 60, in descending order of the results.
Note: the scores will always be unique (so no duplicate values)
"""
def my_languages(results: dict) -> list:
"""Returns all languages with score at least 60
Args:
results (dict): mapped languages with scores
Examples:
>>> assert my_languages({"Java": 10, "Ruby": 80, "Python": 65}) == ["Ruby", "Python"]
"""
return [
language
for language, score in sorted(
results.items(), key=lambda pair: pair[1], reverse=True
)
if score > 59
]
if __name__ == "__main__":
print(my_languages({"Java": 10, "Ruby": 80, "Python": 65}))
|
import time
import os
import re
import math
import keyboard
from datetime import datetime
import tkinter as tk
from tkinter import filedialog
def ClearConsole():
os.system('cls' if os.name == 'nt' else 'clear')
startingTimeStamp = ""
endingTimeStamp = ""
#Regex for HH:MM:SS
timePattern = re.compile(r"\d{2}:\d{2}:\d{2}")
runs = []
realRuns = {}
def RunIndex():
RunIndex.counter += 1
return RunIndex.counter
RunIndex.counter = 0
#a snippet that I grabbed from stackoverflow, need to figure out an asyncronous way to handle this
def Follow(thefile):
thefile.seek(0,os.SEEK_END) # Go to the end of the file
while True:
line = thefile.readline()
if not line:
time.sleep(0.1) # Sleep briefly
continue
return line
def CheckLog(thefile,endOfFile):
curEnd = thefile.seek(0,os.SEEK_END)
if endOfFile != curEnd:
thefile.seek(0,os.SEEK_END)
# Returns the difference between two datetime objects in seconds
def TimeDifferenceInSeconds(timeStamp1, timeStamp2):
FMT = "%H:%M:%S"
timeDelta = datetime.strptime(timeStamp2, FMT) - datetime.strptime(timeStamp1, FMT)
return timeDelta.seconds
#Prints the run with the information given
def PrintRun(timeStarted, seconds):
#Record the run
runs.append(seconds)
realRuns[str(timeStarted)] = seconds
#Build the string to print
timeStringText = ""
timeStringText = "Run No: " + str(RunIndex()) + " | "
timeStringText = timeStringText + str(seconds) + " second(s)\t"
#The definition of pointless
runCount = len(runs)
if runCount == 0:
print("Let's not divide by zero, k?")
return
averageTime = sum(runs)/len(runs)
timeStringText= timeStringText + " | Average time: " + str(round(averageTime,3)) + " seconds."
#We got this far, print!
print(timeStringText)
return
def WatchLog(RunActive, logFile):
while True:
where = logFile.tell
line = Follow(logFile)
#ClearConsole()
#print(keyboard._pressed_events)
if not line:
logFile.seek(where)
else:
if line.__contains__("You have entered "):
if line.__contains__("Ossuary") or line.__contains__("Blood"): #yeah I was recording blood aquaduct times, shoot me
if not RunActive:
RunActive = True
startingTimeStamp = timePattern.search(line).group(0)
elif RunActive:
RunActive = False
endingTimeStamp = timePattern.search(line).group(0)
runTimeInSeconds = TimeDifferenceInSeconds(startingTimeStamp, endingTimeStamp)
PrintRun(startingTimeStamp, runTimeInSeconds)
#potential place to exit while for keyboard input
def main():
#Initializing shit
root = tk.Tk()
root.withdraw()
#ask for the client.txt location, and open it
logFileName = filedialog.askopenfilename()
logFile = open(logFileName,'r')
runStarted = False
stopped = False
ClearConsole()
while not stopped:
WatchLog(runStarted, logFile)
#if keyboard.is_pressed(keyboard._pressed_events['q']):
# stopped = True
#print(keyboard._pressed_events)
if __name__ == "__main__":
main() |
"""
Translator from output neuron spike trains to actions
for the environment. Actioned determined based on neuron
firing rate greater than action_threshold or not, as
`output_range[firing_rate >= action_threshold]`.
"""
import numpy as np
from spikey.module import Module, Key
from spikey.snn.readout.template import Readout
class Threshold(Readout):
"""
Translator from output neuron spike trains to actions
for the environment. Actioned determined based on neuron
firing rate greater than action_threshold or not, as
`output_range[firing_rate >= action_threshold]`.
Parameters
----------
kwargs: dict
Dictionary with values for each key in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
config = {
"n_outputs": 10,
"magnitude": 2,
"output_range": [-1, 1],
"action_threshold": .5,
}
readout = Threshold(**config)
readout.reset()
action = readout(np.ones((10, config["n_outputs"])))
.. code-block:: python
class network_template(Network):
keys = {
"n_outputs": 10,
"magnitude": 2,
"output_range": [-1, 1],
"action_threshold": .5,
}
parts = {
"readout": Threshold
}
"""
NECESSARY_KEYS = Readout.extend_keys(
[
Key(
"action_threshold",
"float or 'mean' Output neuron rate threshold to trigger high state.",
),
Key(
"output_range",
"list[float] Range of values output can produce.",
default=[0, 1],
),
]
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self._action_threshold == "mean":
self.rate_log = []
def __call__(self, output_spike_train: np.bool) -> object:
"""
Interpret the output neuron's spike train.
Called once per game step.
Parameters
----------
output_spike_train: np.ndarray[t, n_neurons, dtype=bool]
Spike train with train[-1] being the most recent time.
Returns
-------
output_range[rate >= threshold] Selected action based on whether rate was
greater than threshold or not.
"""
if self._n_outputs == 0:
return 0
rate = np.mean(output_spike_train) / self._magnitude
if self._action_threshold == "mean":
threshold = np.mean(self.rate_log) if self.rate_log else 0
self.rate_log.append(rate)
else:
threshold = self._action_threshold
action = self._output_range[bool(rate >= threshold)]
return action
|
#!/usr/bin/python2
# Copyright (c) 2014, 2015 Mathias Laurin
# BSD 3-Clause License (http://opensource.org/licenses/BSD-3-Clause)
r"""Print all dependencies required to build a port as a graph.
Usage:
port_deptree.py [--min] PORTNAME [VARIANTS ...]
Example:
port_deptree.py irssi -perl | dot -Tpdf -oirssi.pdf
port_deptree.py --min $(port echo requested and outdated)\
| dot -Tpdf | open -fa Preview
"""
from __future__ import print_function
import sys
import subprocess
from itertools import product
from altgraph import Dot, Graph
__version__ = "0.9"
_stdout, sys.stdout = sys.stdout, sys.stderr
class NodeData(object):
__slots__ = ("type", "status")
def __init__(self, type):
self.type = type # in (root, vertex, leaf)
self.status = "missing" # in (installed, outdated, missing)
class EdgeData(object):
__slots__ = ("section",)
def __init__(self, section):
self.section = section
def get_deps(portname, variants):
"""Return `section, depname` dependents of `portname` with `variants`."""
process = ["port", "deps", portname]
process.extend(variants)
for line in subprocess.Popen(
process,
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout.readlines():
section, sep, children = line.partition(":")
if not section.endswith("Dependencies"):
continue
for child in [child.strip() for child in children.split(",")]:
section = section.split()[0].lower()
child = child.strip()
if child:
yield section, child
def make_graph(graph, portname, variants):
"""Traverse dependency tree of `portname` with `variants`.
Args:
portname (str): The name of a port.
variants (list): The variants to apply to `portname`.
"""
def call(cmd):
return subprocess.Popen(
cmd.split(),
stdout=subprocess.PIPE,
universal_newlines=True,
).stdout.readlines()
installed = set(line.split()[0] for line in call("port echo installed"))
outdated = set(line.split()[0] for line in call("port echo outdated"))
visited = set(node for node in graph)
def traverse(parent):
"""Recursively traverse dependencies to `parent`."""
if parent in visited:
return
else:
visited.add(parent)
node_data = graph.node_data(parent)
if parent in outdated:
node_data.status = "outdated"
elif parent in installed:
node_data.status = "installed"
for section, child in get_deps(parent.strip('"'), variants):
if node_data.type != "root":
node_data.type = "vertex"
if child not in graph:
graph.add_node(child, NodeData("leaf"))
graph.add_edge(
parent, child, EdgeData(section), create_nodes=False
)
traverse(child)
graph.add_node(portname, NodeData("root"))
traverse(portname)
def reduce_graph(graph, root):
"""Keep only "missing" and "outdated" nodes and their parents."""
for node in graph.forw_bfs(root):
node_data = graph.node_data(node)
if node_data.type == "root" or node_data.status != "installed":
continue
children = set(graph.tail(edge) for edge in graph.out_edges(node))
if not set(("outdated", "missing")).intersection(
data.status
for data in (graph.node_data(child) for child in children)
):
parents = set(graph.head(edge) for edge in graph.inc_edges(node))
for parent, child in product(parents, children):
if not graph.edge_by_node(parent, child):
graph.add_edge(parent, child, EdgeData("virtual"))
graph.hide_node(node)
def make_dot(graph):
"""Convert the graph to a dot file.
Node and edge styles is obtained from the corresponding data.
Args:
graph (Graph.Graph): The graph.
Returns:
Dot.Dot: The dot file generator.
"""
dot = Dot.Dot(graph, graphtype="digraph")
dot.style(overlap=False, bgcolor="transparent")
for node in graph:
node_data = graph.node_data(node)
shape = "circle" if node_data.type == "vertex" else "doublecircle"
color, fillcolor = dict(
missing=("red", "moccasin"), outdated=("forestgreen", "lightblue")
).get(node_data.status, ("black", "white"))
dot.node_style(
node, shape=shape, style="filled", fillcolor=fillcolor, color=color
)
for edge, edge_data, head, tail in (
graph.describe_edge(edge) for edge in graph.edge_list()
):
section = edge_data.section
color = dict(
fetch="forestgreen",
extract="darkgreen",
build="blue",
runtime="red",
virtual="darkgray",
).get(section, "black")
style = dict(virtual="dashed").get(section, "solid")
dot.edge_style(
head,
tail,
label=section if section not in ("library", "virtual") else "",
style=style,
color=color,
fontcolor=color,
)
return dot
def make_stats(graph):
"""Return the stats for `graph`."""
stats = dict(
missing=0, installed=0, outdated=0, total=graph.number_of_nodes()
)
for node in graph:
node_data = graph.node_data(node)
stats[node_data.status] += 1
return stats
if __name__ == "__main__":
graph = Graph.Graph()
reduce = False
commandline = {}
try:
if not sys.argv[1:]:
raise RuntimeError
for arg in sys.argv[1:]:
if arg.startswith("@"):
continue
elif arg.startswith("--min"):
reduce = True
elif not (arg.startswith("+") or arg.startswith("-")):
portname = arg
commandline[portname] = []
else:
commandline[portname].append(arg)
except:
print(__doc__, file=sys.stderr)
exit(1)
for portname, variants in commandline.items():
print(
"Calculating dependencies for",
portname,
*variants,
file=sys.stderr
)
make_graph(graph, portname, variants)
stats = make_stats(graph)
if reduce:
for portname in commandline:
reduce_graph(graph, portname)
print(
"Total:",
stats["total"],
"(%i" % stats["outdated"],
"upgrades,",
stats["missing"],
"new)",
file=sys.stderr,
)
for line in make_dot(graph).iterdot():
print(line, file=_stdout)
_stdout.flush()
|
from .endpoint import Endpoint
class File(Endpoint):
def __init__(self, filename):
self.filename = filename
def read(self):
with open(self.filename, 'r') as f:
return f.readlines()
def write(self, data):
with open(self.filename, 'w+') as f:
f.writelines(data)
def add(self, data):
with open(self.filename, 'a+') as f:
f.writelines(data)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from pyramid import viewderivers
from pyramid.i18n import Localizer
from warehouse import i18n
class TestInvalidLocalizer:
@pytest.mark.parametrize(
"method",
[
# Our custom methods.
"pluralize",
"translate",
],
)
def test_methods_raise(self, method):
localizer = i18n.InvalidLocalizer()
with pytest.raises(RuntimeError):
getattr(localizer, method)()
@pytest.mark.parametrize("name", ["locale_name"])
def test_propery_raises(self, name):
localizer = i18n.InvalidLocalizer()
with pytest.raises(RuntimeError):
getattr(localizer, name)
class TestTranslatedView:
def test_has_options(self):
assert set(i18n.translated_view.options) == {"has_translations"}
@pytest.mark.parametrize("has_translations", [False, None])
def test_invalid_localizer(self, has_translations):
context = pretend.stub()
request = pretend.stub(localizer=pretend.stub())
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.localizer, i18n.InvalidLocalizer)
return response
info = pretend.stub(options={}, exception_only=False)
if has_translations is not None:
info.options["has_translations"] = has_translations
derived_view = i18n.translated_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
def test_valid_localizer(self, monkeypatch):
add_vary_cb = pretend.call_recorder(lambda fn: fn)
add_vary = pretend.call_recorder(lambda vary: add_vary_cb)
monkeypatch.setattr(i18n, "add_vary", add_vary)
context = pretend.stub()
request = pretend.stub(localizer=Localizer(locale_name="en", translations=[]))
response = pretend.stub()
@pretend.call_recorder
def view(context, request):
assert isinstance(request.localizer, Localizer)
return response
info = pretend.stub(options={"has_translations": True})
derived_view = i18n.translated_view(view, info)
assert derived_view(context, request) is response
assert view.calls == [pretend.call(context, request)]
assert add_vary.calls == [pretend.call("PyPI-Locale")]
assert add_vary_cb.calls == [pretend.call(view)]
def test_sets_locale(monkeypatch):
locale_name = pretend.stub()
locale_obj = pretend.stub()
monkeypatch.setattr(
i18n, "KNOWN_LOCALES", {locale_name: locale_obj, "en": pretend.stub()}
)
request = pretend.stub(locale_name=locale_name)
assert i18n._locale(request) is locale_obj
def test_when_locale_is_missing(monkeypatch):
locale_obj = pretend.stub()
monkeypatch.setattr(i18n, "KNOWN_LOCALES", {"en": locale_obj})
request = pretend.stub(locale_name=None)
assert i18n._locale(request) is locale_obj
def test_negotiate_locale(monkeypatch):
request = pretend.stub(_LOCALE_="fake-locale-attr")
assert i18n._negotiate_locale(request) == "fake-locale-attr"
request = pretend.stub(params={"_LOCALE_": "fake-locale-param"})
assert i18n._negotiate_locale(request) == "fake-locale-param"
request = pretend.stub(params={}, cookies={"_LOCALE_": "fake-locale-cookie"})
assert i18n._negotiate_locale(request) == "fake-locale-cookie"
request = pretend.stub(params={}, cookies={}, accept_language=None)
default_locale_negotiator = pretend.call_recorder(lambda r: "fake-locale-default")
monkeypatch.setattr(i18n, "default_locale_negotiator", default_locale_negotiator)
assert i18n._negotiate_locale(request) == "fake-locale-default"
request = pretend.stub(
params={},
cookies={},
accept_language=pretend.stub(
best_match=pretend.call_recorder(lambda *a, **kw: "fake-locale-best-match")
),
)
assert i18n._negotiate_locale(request) == "fake-locale-best-match"
def test_localize(monkeypatch):
request = pretend.stub(
localizer=pretend.stub(
translate=pretend.call_recorder(lambda ts: "fake translated string")
)
)
get_current_request = pretend.call_recorder(lambda: request)
monkeypatch.setattr(i18n, "get_current_request", get_current_request)
assert str(i18n.localize("foo")) == "fake translated string"
def test_includeme():
config_settings = {}
config = pretend.stub(
add_translation_dirs=pretend.call_recorder(lambda s: None),
set_locale_negotiator=pretend.call_recorder(lambda f: None),
add_request_method=pretend.call_recorder(lambda f, name, reify=False: None),
get_settings=lambda: config_settings,
add_view_deriver=pretend.call_recorder(lambda f, over, under: None),
)
i18n.includeme(config)
assert config.add_translation_dirs.calls == [pretend.call("warehouse:locale/")]
assert config.set_locale_negotiator.calls == [pretend.call(i18n._negotiate_locale)]
assert config.add_request_method.calls == [
pretend.call(i18n._locale, name="locale", reify=True),
pretend.call(i18n._localize, name="_"),
]
assert config.add_view_deriver.calls == [
pretend.call(
i18n.translated_view, over="rendered_view", under=viewderivers.INGRESS
)
]
assert config_settings == {
"jinja2.filters": {
"format_date": "warehouse.i18n.filters:format_date",
"format_datetime": "warehouse.i18n.filters:format_datetime",
"format_rfc822_datetime": "warehouse.i18n.filters:format_rfc822_datetime",
"format_number": "warehouse.i18n.filters:format_number",
},
"jinja2.globals": {"KNOWN_LOCALES": "warehouse.i18n:KNOWN_LOCALES"},
}
def test_lazy_string():
def stringify(string_in, *args, **kwargs):
return string_in
lazy_string = i18n.LazyString(stringify, "test_string")
assert lazy_string.__json__(None) == "test_string"
|
"""
Extract local maxima from a spm, return a csv file with variables:
- x-axis array index (i)
- y-axis array index (j)
- z-axis array index (k)
- peak z-value
- peak p-value
"""
import numpy as np
import pandas as pd
def PeakTable(spm, exc, mask):
"""
Identify local maxima above z-value threshold in masked statistical
image in array form.
Parameters
----------
spm : :obj:`numpy.ndarray`
Z-statistic map in array form.
exc : :obj:`float`
Voxel-wise z-value threshold (i.e., excursion threshold or cluster-
defining threshold) to apply to ``spm``.
mask : :obj:`numpy.ndarray`
Boolean mask in array form.
Returns
-------
peak_df : :obj:`pandas.DataFrame`
DataFrame with local maxima (peaks) from statistical map. Each peak is
provided with i, j, and k indices, z-value, and peak-level p-value.
"""
r = 1 # radius of cube in voxels
spm_ext = np.pad(spm, r, 'constant')
msk_ext = np.pad(mask, r, 'constant')
spm_ext = spm_ext * msk_ext
shape = spm.shape
# create empty dataframe
labels = ['i', 'j', 'k', 'zval']
peak_df = pd.DataFrame(columns=labels)
# check for each voxel whether it's a peak. if it is, add to table
for m in range(r, shape[0]+r):
for n in range(r, shape[1]+r):
for o in range(r, shape[2]+r):
if spm_ext[m, n, o] > exc:
surroundings = spm_ext[m-r:m+r+1, n-r:n+r+1, o-r:o+r+1].copy()
surroundings[r, r, r] = 0
if spm_ext[m, n, o] > np.max(surroundings):
res = pd.DataFrame(data=[[m-r, n-r, o-r, spm_ext[m, n, o]]],
columns=labels)
peak_df = peak_df.append(res)
# Peak-level p-values (not the same as simple z-to-p conversion)
p_values = np.exp(-float(exc) * (np.array(peak_df['zval']) - float(exc)))
p_values[p_values < 10**-6] = 10**-6
peak_df['pval'] = p_values
peak_df = peak_df.sort_values(by=['zval'], ascending=False)
peak_df.index = range(len(peak_df))
return peak_df
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from telemetry import story
from telemetry.core import platform
from telemetry.page import page
from telemetry.internal.util import binary_manager
HTTP_EXAMPLE = 'http://www.example.com'
HTTPS_EXAMPLE = 'https://www.example.com'
def FetchExampleDomainArchive():
''' Return the path to wpr go archive of example.com page.
This may involve fetching the archives from cloud storage if it doesn't
exist on local file system.
'''
p = platform.GetHostPlatform()
return binary_manager.FetchPath(
'example_domain_wpr_go_archive', p.GetOSName(), p.GetArchName())
class ExampleDomainPageSet(story.StorySet):
def __init__(self):
super(ExampleDomainPageSet, self).__init__(
archive_data_file='data/example_domain.json',
cloud_storage_bucket=story.PUBLIC_BUCKET)
self.AddStory(page.Page(HTTP_EXAMPLE, self, name=HTTP_EXAMPLE))
self.AddStory(page.Page(HTTPS_EXAMPLE, self, name=HTTPS_EXAMPLE))
|
# this is a file, otherwise known as a "module" in python
# the __future__ module can help make in built functions backwards compatible
# https://docs.python.org/2/library/__future__.html
from __future__ import print_function
import os
from sys import platform
import json
def function():
# get the path to this module (file)
file_path = os.path.dirname(os.path.abspath(__file__))
# determine what slash is used for this file system
slash = syscheck()
file_path_split = file_path.split(slash)
module_name = file_path_split[-1]
package_name = file_path_split[-2]
print("Inside of file: {} \nInside the package: {}".format(module_name, package_name))
read_file(file_path, slash)
def read_file(path, slash):
# access the .json file, even AFTER its been pip installed (see MANIFEST.in)
with open(path + slash + 'file.json', 'r') as myfile:
data = myfile.read()
parsed_data = json.loads(data)
print('accessing the dictionary stored in the .json-> key:{}'.format(parsed_data['key']))
def syscheck():
"""
Jack C. Cook
Saturday, February 15, 2020
Check the operating system to determine what slash is used
:return: the "slash" that is used on the OS
"""
if "darwin" in platform or "linux" in platform:
slash = r'/'
elif platform == "win32" or platform == "win64":
slash = '\\'
else:
raise ValueError("The platform: {} is not currently handled by this function.".format(platform))
return slash
|
import numpy as np
import torch
import torch.nn as nn
import torchvision
import pandas as pd
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn import metrics
import torchvision.transforms as transforms
from Dataset.Utils import get_target_label_idx,global_contrast_normalization,OneClass
from Dataset.DatasetLoader import MNIST_loader,FMNIST_loader, CIFAR_loader,Speech_loader,PIMA_loader
from Network.CIFARNet import AE_CIFAR
from Network.MNISTNet import AE_MNIST
from Network.PIMANet import AE_PIMA
from Network.SpeechNet import AE_Speech
from Source.GammaTune import tune_gamma
from Source.Tester import DASVDD_test
from Source.Trainer import DASVDD_trainer
train_loader,test_loader,labels = MNIST_loader(train_batch=200,test_batch=1,Class=0)
in_shape = 28*28
code_size = 256
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AE_MNIST(input_shape=in_shape).to(device)
params = list(model.parameters())
optimizer = torch.optim.Adam(params,lr=1e-3)
C = torch.randn(code_size,device = device,requires_grad=True)
update_center = torch.optim.Adagrad([C],lr=1,lr_decay=0.01)
criterion=nn.MSELoss()
Gamma = tune_gamma(AE_MNIST,in_shape,criterion,train_loader,device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),T=10)
DASVDD_trainer(model,in_shape,code_size,C,train_loader,optimizer,update_center,criterion,Gamma,device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
num_epochs = 300,K=0.9)
DASVDD_test(model,C,in_shape,Gamma,test_loader,labels,criterion,C)
|
"""
Title: Simple custom layer example: Antirectifier
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2016/01/06
Last modified: 2020/04/20
Description: Demonstration of custom layer creation.
"""
"""
## Introduction
This example shows how to create custom layers, using the Antirectifier layer
(originally proposed as a Keras example script in January 2016), an alternative
to ReLU. Instead of zeroing-out the negative part of the input, it splits the negative
and positive parts and returns the concatenation of the absolute value
of both. This avoids loss of information, at the cost of an increase in dimensionality.
To fix the dimensionality increase, we linearly combine the
features back to a space of the original size.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## The Antirectifier layer
"""
class Antirectifier(layers.Layer):
def __init__(self, initializer="he_normal", **kwargs):
super(Antirectifier, self).__init__(**kwargs)
self.initializer = keras.initializers.get(initializer)
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer=self.initializer,
name="kernel",
trainable=True,
)
def call(self, inputs):
inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True)
pos = tf.nn.relu(inputs)
neg = tf.nn.relu(-inputs)
concatenated = tf.concat([pos, neg], axis=-1)
mixed = tf.matmul(concatenated, self.kernel)
return mixed
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super(Antirectifier, self).get_config()
config = {"initializer": keras.initializers.serialize(self.initializer)}
return dict(list(base_config.items()) + list(config.items()))
"""
## Let's test-drive it on MNIST
"""
# Training parameters
batch_size = 128
num_classes = 10
epochs = 20
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784)
x_test = x_test.reshape(-1, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Build the model
model = keras.Sequential(
[
keras.Input(shape=(784,)),
layers.Dense(256),
Antirectifier(),
layers.Dense(256),
Antirectifier(),
layers.Dropout(0.5),
layers.Dense(10),
]
)
# Compile the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15)
# Test the model
model.evaluate(x_test, y_test)
|
""" Implementation of a node in linked lists and binary search trees. """
from typing import TypeVar, Generic
I = TypeVar('I')
K = TypeVar('K')
T = TypeVar('T')
__author__ = 'Maria Garcia de la Banda and Brendon Taylor. Modified by Alexey Ignatiev'
__docformat__ = 'reStructuredText'
class TreeNode(Generic[K, I]):
""" Node class represent BST nodes. """
def __init__(self, key: K, item: I = None) -> None:
"""
Initialises the node with a key and optional item
and sets the left and right pointers to None
:complexity: O(1)
"""
self.key = key
self.item = item
self.left = None
self.right = None
def __str__(self):
"""
Returns the string representation of a node
:complexity: O(N) where N is the size of the item
"""
key = str(self.key) if type(self.key) != str else "'{0}'".format(self.key)
item = str(self.item) if type(self.item) != str else "'{0}'".format(self.item)
return '({0}, {1})'.format(key, item)
class AVLTreeNode(TreeNode, Generic[K, I]):
""" Node class for AVL trees.
Objects of this class have an additional variable - height.
"""
def __init__(self, key: K, item: I = None) -> None:
"""
Initialises the node with a key and optional item
and sets the left and right pointers to None
:complexity: O(1)
"""
super(AVLTreeNode, self).__init__(key, item)
self.height = 1
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Classes to manage priors
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.stats as stats
from scipy.interpolate import interp1d
from scipy.integrate import quad
from dmsky.utils import stat_funcs
from dmsky.utils import tools
from dmsky.library import FileLibrary
global filelib
filelib = FileLibrary()
class PriorFunctor(object):
"""A functor class that wraps simple functions we use to
make priors on parameters.
"""
def __init__(self, funcname, scale=1.0):
"""C'tor
Parameters
----------
funcname : str
Name for this function, used for bookeeping
scale : float
Scale factor applied to input values.
"""
self._funcname = funcname
self._scale = scale
def __call__(self, x):
"""Return the Prior value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
raise NotImplementedError("PriorFunctor.__call__")
def log_value(self, x):
"""Return the log of the function value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
return np.log(self.__call__(self._scale*x))
def normalization(self):
"""Normalization, i.e. the integral of the function
over the normalization_range.
"""
norm_r = self._normalization_range()
return quad(self, norm_r[0]*self.scale, norm_r[1]*self.scale)[0]
def _normalization_range(self):
"""Normalization range.
"""
return 0, np.inf
def mean(self):
"""Mean value of the function.
"""
raise NotImplementedError("prior_functor.mean")
def sigma(self):
"""The 'width' of the function.
What this means depend on the function being used.
"""
raise NotImplementedError("prior_functor.sigma")
@property
def funcname(self):
"""A string identifying the function.
"""
return self._funcname
@property
def scale(self):
"""The scale factor applied to input values
"""
return self._scale
def marginalization_bins(self):
"""Binning to use to do the marginalization integrals
Default is to marginalize over two decades,
centered on mean, using 1000 bins
"""
log_mean = np.log10(self.mean())
return np.logspace(-1. + log_mean, 1. + log_mean, 1001)/self._scale
def profile_bins(self):
"""The binning to use to do the profile fitting
Default is to profile over +-5 sigma,
Centered on mean, using 100 bins
"""
log_mean = np.log10(self.mean())
log_half_width = max(5. * self.sigma(), 3.)
return np.logspace(log_mean - log_half_width,
log_mean + log_half_width, 101)/self._scale
class FunctionPrior(PriorFunctor):
"""Implementation of a prior that simply wraps an existing function
"""
def __init__(self, funcname, mu, sigma, fn, lnfn=None, scale=1.0):
"""C'tor
Parameters
----------
funcname : str
Name for this function, used for bookeeping
mu : float
Central value of the Prior, used to get scan ranges
sigma : float
Width of the Prior, used to get scan ranges
fn : function
Function that returns the Prior value
lnfn : function or None
Optional function that returns the log of the Prior value
scale : float
Scale factor applied to input values.
"""
# FIXME, why doesn't super(FunctionPrior, self) work here?
PriorFunctor.__init__(self, funcname, scale)
self._mu = mu
self._sigma = sigma
self._fn = fn
self._lnfn = lnfn
def normalization(self):
"""The normalization
i.e., the intergral of the function over the normalization_range
"""
norm_r = self._normalization_range()
return quad(self, norm_r[0]*self.scale, norm_r[1]*self.scale)[0]
def mean(self):
"""Return the mean value of the function.
"""
return self._mu
def sigma(self):
"""Return the 'width' of the function.
What this means depend on the function being used.
"""
return self._sigma
def log_value(self, x):
""""Return the log of the function value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
if self._lnfn is None:
return np.log(self._fn(x*self.scale, self._mu, self._sigma))
return self._lnfn(x*self.scale, self._mu, self._sigma)
def __call__(self, x):
"""Return the Prior value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
return self._fn(x*self.scale, self._mu, self._sigma)
class GaussPrior(FunctionPrior):
"""Implemenation of a Prior that wraps a Gaussian
"""
def __init__(self, mu, sigma, scale=1.0):
"""C'tor
Parameters
----------
mu : float
Central value of the Gaussian
sigma : float
Sigma of the Gaussian
scale : float
Scale factor applied to input values.
"""
super(GaussPrior, self).__init__("gauss", mu, sigma,
fn=stat_funcs.gauss,
lnfn=stat_funcs.lngauss,
scale=scale)
class LGaussPrior(FunctionPrior):
"""Implemenation of a Prior that wraps a log Gaussian
"""
def __init__(self, mu, sigma, scale=1.0):
"""C'tor
Parameters
----------
mu : float
Central value of the log Gaussian
sigma : float
Sigma of the Gaussian
"""
super(LGaussPrior, self).__init__("lgauss", mu, sigma,
fn=stat_funcs.lgauss,
lnfn=stat_funcs.lnlgauss,
scale=scale)
class LGaussLikePrior(FunctionPrior):
"""Implemenation of a Prior that wraps the
inverse of the log of a Gaussian (i.e., x and y axes are swapped)
"""
def __init__(self, mu, sigma, scale=1.0):
"""C'tor
Parameters
----------
mu : float
Central value of the underlying Gaussian
sigma : float
Sigma of the underlying Gaussian
scale : float
Scale factor applied to input values.
"""
def fn(x, y, s):
"""Swap the axes of the lgauss function"""
return stat_funcs.lgauss(y, x, s)
def lnfn(x, y, s):
"""Swap the axes of the lnlgauss function"""
return stat_funcs.lnlgauss(y, x, s)
super(LGaussLikePrior, self).__init__("lgauss_like", mu, sigma,
fn=fn, lnfn=lnfn, scale=scale)
class LGaussLogPrior(FunctionPrior):
"""Implemenation of a Prior that wraps the
inverse of the log of a Gaussian (i.e., x and y axes are swapped)
The prior is implemented in log-space.
"""
def __init__(self, mu, sigma, scale=1.0):
"""C'tor
Parameters
----------
mu : float
Central value of the underlying Gaussian
sigma : float
Sigma of the underlying Gaussian
scale : float
Scale factor applied to input values.
"""
def fn(x, y, s):
"""Take the lgauss function and work in log space"""
return stat_funcs.lgauss(x, y, s, logpdf=True)
def lnfn(x, y, s):
"""Take the nlgauss function and work in log space"""
return stat_funcs.lnlgauss(x, y, s, logpdf=True)
super(LGaussLogPrior, self).__init__("lgauss_log", mu, sigma,
fn=fn, lnfn=lnfn, scale=scale)
class LognormPrior(PriorFunctor):
""" A wrapper around the lognormal function.
A note on the highly confusing scipy.stats.lognorm function...
The three inputs to this function are:
s : This is the variance of the underlying
gaussian distribution
scale = 1.0 : This is the mean of the linear-space
lognormal distribution.
The mean of the underlying normal distribution
occurs at ln(scale)
loc = 0 : This linearly shifts the distribution in x (DO NOT USE)
The convention is different for numpy.random.lognormal
mean : This is the mean of the underlying
normal distribution (so mean = log(scale))
sigma : This is the standard deviation of the
underlying normal distribution (so sigma = s)
For random sampling:
numpy.random.lognormal(mean, sigma, size)
mean : This is the mean of the underlying
normal distribution (so mean = exp(scale))
sigma : This is the standard deviation of the
underlying normal distribution (so sigma = s)
scipy.stats.lognorm.rvs(s, scale, loc, size)
s : This is the standard deviation of the
underlying normal distribution
scale : This is the mean of the generated
random sample scale = exp(mean)
Remember, pdf in log space is
plot( log(x), stats.lognorm(sigma,scale=exp(mean)).pdf(x)*x )
Parameters
----------
mu : float
Mean value of the function
sigma : float
Variance of the underlying gaussian distribution
"""
def __init__(self, mu, sigma, scale=1.0):
"""C'tor
Parameters
----------
mu : float
Mean value of the function
sigma : float
Variance of the underlying gaussian distribution
scale : float
Scale factor applied to input values.
"""
super(LognormPrior, self).__init__('lognorm')
self._mu = mu
self._sigma = sigma
def normalization(self):
"""Normalization, i.e. the integral of the function
over the normalization_range.
"""
return 1.
def mean(self):
"""Mean value of the function.
"""
return self._mu
def sigma(self):
""" The 'width' of the function.
What this means depend on the function being used.
"""
return self._sigma
def __call__(self, x):
"""Return the Prior value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
return stats.lognorm(self._sigma, scale=self._mu).pdf(x*self.scale)
class NormPrior(PriorFunctor):
""" A wrapper around the normal function.
Parameters
----------
mu : float
Mean value of the function
sigma : float
Variance of the underlying gaussian distribution
"""
def __init__(self, mu, sigma, scale=1.0):
"""C'tor
Parameters
----------
mu : float
Mean value of the function
sigma : float
Variance of the underlying gaussian distribution
scale : float
Scale factor applied to input values.
"""
super(NormPrior, self).__init__('norm', scale)
self._mu = mu
self._sigma = sigma
def normalization(self):
"""Normalization, i.e. the integral of the function
over the normalization_range.
"""
return 1.
def mean(self):
"""Mean value of the function.
"""
return self._mu
def sigma(self):
""" The 'width' of the function.
What this means depend on the function being used.
"""
return self._sigma
def __call__(self, x):
"""Return the Prior value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
return stats.norm(loc=self._mu, scale=self._sigma).pdf(x*self.scale)
class FileFuncPrior(PriorFunctor):
"""A wrapper around the interpolated function.
Parameters
----------
filename : string
File with the function parameters
"""
def __init__(self, filename, scale=1.0):
"""C'tor
Parameters
----------
filename : string
File with the function parameters
"""
super(FileFuncPrior, self).__init__('file')
self._filename = filename
self._fullpath = filelib.get_filepath(filename)
if self._fullpath is None:
raise ValueError("Could not find file %s in path %s " % (filename, filelib.paths))
d = tools.yaml_load(self._fullpath)
self._mu = d['mean']
self._sigma = d['sigma']
self._x = d['x']
self._y = d['y']
self._kind = d.get('kind', 'linear')
self._fill_value = d.get('fill_value', 0)
self._interpfunc = interp1d(self._x, self._y, kind=self._kind,
bounds_error=False, fill_value=self._fill_value)
def mean(self):
"""Mean value of the function.
"""
return self._mu
def sigma(self):
""" The 'width' of the function.
What this means depend on the function being used.
"""
return self._sigma
def __call__(self, x):
"""Return the Prior value
Parameters
----------
x :`numpy.ndarray`
Input values
Returns
-------
y : `numpy.ndarray`
Output values, same shape as x
"""
return self._interpfunc(x*self.scale)
def create_prior_functor(d):
"""Build a prior from a dictionary.
Parameters
----------
d : A dictionary, it must contain:
d['functype'] : a recognized function type
and all of the required parameters for the
prior_functor of the desired type
Returns
----------
A sub-class of '~fermipy.stats_utils.prior_functor'
Recognized types are:
'lognorm' : Scipy lognormal distribution
'norm' : Scipy normal distribution
'gauss' : Gaussian truncated at zero
'lgauss' : Gaussian in log-space
'lgauss_like' : Gaussian in log-space, with arguments reversed.
'lgauss_logpdf' : ???
"""
functype = d.get('functype', 'lgauss_like')
mu = d['mu']
sigma = d['sigma']
scale = d.get('scale', 1.0)
if functype == 'norm':
return NormPrior(mu, sigma, scale)
elif functype == 'lognorm':
return LognormPrior(mu, sigma, scale)
elif functype == 'gauss':
return GaussPrior(mu, sigma, scale)
elif functype == 'lgauss':
return LGaussPrior(mu, sigma, scale)
elif functype in ['lgauss_like', 'lgauss_lik']:
return LGaussLikePrior(mu, sigma, scale)
elif functype == 'lgauss_log':
return LGaussLogPrior(mu, sigma, scale)
elif functype == 'interp':
return FileFuncPrior(d['filename'], scale)
else:
raise KeyError("Unrecognized prior_functor type %s" % functype)
def factory(ptype, **kwargs):
"""Factor method to create Priors
Keyword arguments are passed to class c'tor
Parameters
----------
ptype : str
Prior type
Returns
-------
prior : `PriorFunctor`
Newly created object
"""
import dmsky.factory
prior_copy = kwargs.copy()
return dmsky.factory.factory(ptype, module=__name__, **prior_copy)
|
import os
from invoke import task
from .vars import conf
@task
def clean(
c,
build=False,
test=False,
sonar=False,
):
patterns = []
if build:
patterns += ["build", f"{conf.name}.egg-info", "**/*.pyc"]
if sonar:
patterns += [
os.path.join(conf.name, s)
for s in [".sonar", ".scannerwork", "sonar-project.properties"]
]
if test:
patterns += [
".pytest_cache",
".coverage",
"**/.coverage",
"htmlcov",
"**/coverage.xml",
]
for pattern in patterns:
c.run(f"rm -rf {pattern}")
@task
def purge(c):
clean(c, True, True, True)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import json
import time
import boto3
from aws_lambda_powertools import Logger
from template_evaluation import eval_expression, eval_template
logger = Logger()
personalize = boto3.client('personalize')
event_bridge = boto3.client('events')
publish_filter_events = os.environ.get('PUBLISH_FILTER_EVENTS', 'yes').lower() == 'yes'
def put_event(detail_type: str, detail: str, resources = []):
"""
Called to publish an event to the default EventBridge event bus when a filter
is created or deleted. Allows applications to synchronize their configuration
such as switching from an old filter to a newly created filter.
"""
logger.info({
'detail_type': detail_type,
'detail': detail,
'resources': resources
})
event_bridge.put_events(
Entries=[
{
'Source': 'personalize.filter.rotator',
'Resources': resources,
'DetailType': detail_type,
'Detail': detail
}
]
)
@logger.inject_lambda_context(log_event=True)
def lambda_handler(event, _):
dataset_group_arn = event["datasetGroupArn"]
current_filter_name_template = event["currentFilterNameTemplate"]
current_filter_expression_template = event["currentFilterExpressionTemplate"]
delete_filter_match_template = event["deleteFilterMatchTemplate"]
current_filter_name = eval_template(current_filter_name_template)
logger.info('Current filter resolved name: %s', current_filter_name)
current_filter_exists = False
filters_to_delete = []
# Step 1: Iterate over existing filters for the dataset group to determine if a new filter
# needs to be created and to collect filters that should be deleted.
paginator = personalize.get_paginator('list_filters')
for paginate_result in paginator.paginate(datasetGroupArn = dataset_group_arn):
for filter in paginate_result['Filters']:
if filter['name'] == current_filter_name:
logger.info('Current filter %s already exists; skipping creation', current_filter_name)
current_filter_exists = True
elif delete_filter_match_template:
delete_match = eval_expression(delete_filter_match_template, {'filter': filter})
if delete_match:
logger.info('Filter %s matched the delete filter template; queueing for deletion', filter['filterArn'])
filters_to_delete.append(filter)
# Step 2: If the current filter does not exist, create it and send an event when it's active (if configured to do so).
if not current_filter_exists:
logger.info('Current filter %s does not exist; creating', current_filter_name)
expression = eval_template(current_filter_expression_template)
response = personalize.create_filter(
datasetGroupArn = dataset_group_arn,
filterExpression = expression,
name = current_filter_name
)
filter_arn = response['filterArn']
logger.info('Filter %s created', filter_arn)
if publish_filter_events:
# FUTURE: move this logic into Step Functions for efficiency and robustness.
logger.info('Waiting for new filter to become active so we can publish filter created event')
status = None
start_time = time.time()
max_time = start_time + 60*12 # 12 minutes
while time.time() < max_time:
describe_filter_response = personalize.describe_filter(filterArn = filter_arn)
status = describe_filter_response["filter"]["status"]
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(10)
logger.info('Waiting for new filter to become active; status is %s; %d seconds elapsed', status, int(time.time() - start_time))
elapsed_time = time.time() - start_time
if status == "CREATE FAILED":
logger.error('Filter %s status is %s', filter_arn, status)
put_event(
detail_type='PersonalizeFilterCreateFailed',
detail = json.dumps({
'datasetGroupArn': dataset_group_arn,
'filterName': current_filter_name,
'filterExpression': expression,
'filterStatus': status,
'failureReason': describe_filter_response['filter'].get('failureReason'),
'waitTimeSeconds': int(elapsed_time)
}),
resources = [ filter_arn ]
)
else:
# Filter status may be ACTIVE or still PENDING/IN PROGRESS (if we timed out).
logger.info('Filter %s status is %s', filter_arn, status)
put_event(
detail_type='PersonalizeFilterCreated',
detail = json.dumps({
'datasetGroupArn': dataset_group_arn,
'filterName': current_filter_name,
'filterExpression': expression,
'filterStatus': status,
'waitTimeSeconds': int(elapsed_time)
}),
resources = [ filter_arn ]
)
# Step 3: Delete any filters eligible for delection according to the match template and send events (if configured).
if len(filters_to_delete) > 0:
logger.info('%s filters marked for deletion', len(filters_to_delete))
for filter in filters_to_delete:
logger.info('Deleting filter %s', filter['filterArn'])
personalize.delete_filter(filterArn = filter['filterArn'])
if publish_filter_events:
put_event(
detail_type='PersonalizeFilterDeleted',
detail = json.dumps({
'datasetGroupArn': dataset_group_arn,
'filterName': filter['name'],
'filterArn': filter['filterArn']
}),
resources = [ filter['filterArn'] ]
)
|
import os
import re
from setuptools import find_packages, setup
def read(f):
return open(f, 'r', encoding='utf-8').read()
def get_version(package):
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name='djangoxform',
version=get_version('xform'),
url='https://github.com/znc-sistemas/Django-XForm',
license='MIT',
description='OpenRosa for Django.',
long_description=read('README.md'),
long_description_content_type='text/markdown',
author='NECTO',
author_email='contato@nectosystems.com.br', # SEE NOTE BELOW (*)
packages=find_packages(exclude=['tests*']),
include_package_data=True,
install_requires=[
'requests==2.21.0',
'pyxform==0.13.1',
'xlrd==1.2.0',
'djangorestframework==3.12.2',
'djangorestframework-xml==1.4.0',
],
python_requires=">=3.5",
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
]
)
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import asyncio
import uuid
from azure.iot.device.aio import IoTHubDeviceClient
from azure.iot.device import Message
messages_to_send = 10
async def main():
# The connection string for a device should never be stored in code. For the sake of simplicity we're using an environment variable here.
# NOTE: connection string must contain ;GatewayHostName=<hostname of your iot edge device>
# make sure your IoT Edge box is setup as a 'transparent gateway' per the IOT Edge documentation
conn_str = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING")
# path to the root ca cert used on your iot edge device (must copy the pem file to this downstream device)
# example: /home/azureuser/edge_certs/azure-iot-test-only.root.ca.cert.pem
ca_cert = os.getenv("IOTEDGE_ROOT_CA_CERT_PATH")
certfile = open(ca_cert)
root_ca_cert = certfile.read()
# The client object is used to interact with your Azure IoT Edge device.
device_client = IoTHubDeviceClient.create_from_connection_string(
connection_string=conn_str, server_verification_cert=root_ca_cert
)
# Connect the client.
await device_client.connect()
async def send_test_message(i):
print("sending message #" + str(i))
msg = Message("test wind speed " + str(i))
msg.message_id = uuid.uuid4()
msg.correlation_id = "correlation-1234"
msg.custom_properties["tornado-warning"] = "yes"
await device_client.send_message(msg)
print("done sending message #" + str(i))
# send `messages_to_send` messages in parallel
await asyncio.gather(*[send_test_message(i) for i in range(1, messages_to_send + 1)])
# Finally, shut down the client
await device_client.shutdown()
if __name__ == "__main__":
asyncio.run(main())
# If using Python 3.6 or below, use the following code instead of asyncio.run(main()):
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# loop.close()
|
import argparse
import sys
import yaml
from source.utils import visualize_filters, printconfig
from source.train import training
from source.test_image import testing_image
from source.test_video import testing_video
if __name__ == "__main__":
""" Main script - runs everything from here
main.py is the primary command console for all operations. The argparser takes command line arguments to run different modes of this project. Run "python3 main.py --help" for automatic description of the arguments.
The arguments only indicate the mode of operation, configuration values are instead taken from a .yaml file (default = config.yaml). Each mode has its own dictionary in the .yaml file.
All functions/classes present in files in the source/ folder.
"""
# Initialize argparser
parser = argparse.ArgumentParser(description='ESPCN')
parser.add_argument('-pc', '--print-config', dest= 'print_config', default=None, action='store_true', help= 'print configuration file')
parser.add_argument('-c', '--config-file', dest= 'config_file', default='config.yaml', action='store_true', help= 'path to configuration file')
parser.add_argument('-t', '--train', dest= 'train', default=None, action='store_true', help= 'train the model')
parser.add_argument('-im','--test-image', dest= 'test_image', default=None, action='store_true', help= 'test an image using ESPCN')
parser.add_argument('-vi','--test-video', dest= 'test_video', default=None, action='store_true', help= 'test a video using ESPCN')
parser.add_argument('-b','--batch', dest= 'batch_mode', default=None, action='store_true', help= 'process entire directory of images/videos')
parser.add_argument('-f','--filters-vis', dest= 'filters_vis', default=None, action='store_true', help= 'visualize filters of each conv layer')
parser.add_argument('-p','--plot', dest= 'plot', default=None, action='store_true', help= 'plot psnr for image batches or videos')
args = parser.parse_args()
# Load configuration dictionary
with open(args.config_file) as f:
config_dict = yaml.safe_load(f)
# if conditions on command line arguments decide operation(s)
printconfig(config_dict) if args.print_config else None
visualize_filters(config_dict['visualize filters']) if args.filters_vis else None
if not (args.train or args.test_image or args.test_video):
print('Please provide argument to train/test')
sys.exit()
training(config_dict['training']) if args.train else None
testing_image(config_dict['test image'], args.batch_mode, args.plot) if args.test_image else None
testing_video(config_dict['test video'], args.batch_mode, args.plot) if args.test_video else None |
import re
from string import ascii_lowercase
from PyQt5.QtCore import QMutex, QCoreApplication
from PyQt5.QtWidgets import QWidget, QTabWidget, QTabBar, QLineEdit
from pyntpg.dataset_tabs.dataset_tab import DatasetTab
from pyntpg.datasets_container import DatasetsContainer
class DatasetTabs(QTabWidget):
""" High level wrapper widget which will
contain all the tabs.
"""
# Signal on which to emit a dict of dataset: nc_obj when any dataset updated
def __init__(self):
super(DatasetTabs, self).__init__()
self.setTabPosition(QTabWidget.North)
self.setMinimumHeight(150)
self.setMaximumHeight(325)
# Mutex used to protect from tab_changed firing
# itself again when the "+" is clicked and we add new
# tab and setCurrentIndex
self.number_tabs_added = 0
self.mutex = QMutex()
self.setTabBar(DatasetTabBar())
dataset_tab = DatasetTab(self)
dataset_tab.dataset_ready.connect(lambda path: self.publish_dataset(path, dataset_tab))
self.addTab(dataset_tab, "dataset")
# Add the "+" tab and make sure it has no close button
# make sure that the + tab has no close button
plus_tab = QWidget()
self.addTab(plus_tab, "+")
index = self.indexOf(plus_tab)
self.tabBar().setTabButton(index, QTabBar.RightSide, None)
self.currentChanged.connect(self.tab_changed)
self.tabCloseRequested.connect(self.close_tab)
self.datasets = QCoreApplication.instance().datasets # type: DatasetsContainer
def tab_changed(self, index):
maxindex = self.count() - 1
if (index == maxindex or index == -1) and self.mutex.tryLock():
dataset_tab = DatasetTab(self)
dataset_tab.dataset_ready.connect(lambda path: self.publish_dataset(path, dataset_tab))
self.insertTab(maxindex, dataset_tab, "dataset_"
+ ascii_lowercase[self.number_tabs_added % len(ascii_lowercase)])
self.number_tabs_added += 1
self.setCurrentIndex(maxindex)
self.mutex.unlock()
def close_tab(self, index):
if index == self.count() - 2:
self.setCurrentIndex(index - 1)
self.datasets.close(self.tabText(index)) # Broadcast the remove event
to_remove = self.widget(index)
self.removeTab(index)
to_remove.deleteLater()
def publish_dataset(self, path, tab):
index = self.indexOf(tab)
if index == -1:
return # hmm, tab wasn't found
# Here, ok to pass on empty path, DatasetContainer.open delegates properly
self.datasets.open(self.tabText(index), path)
class DatasetTabBar(QTabBar):
""" The QTabBar controls the actual tabs
in the tab bar. In here, we are setting the
behavior for edit tab name on double click.
"""
# credits of http://stackoverflow.com/a/30269356
def __init__(self):
QTabBar.__init__(self)
# Mutex to keep from editing another tab
# while one is already being edited
self.mutex = QMutex()
self.setTabsClosable(True)
self.datasets = QCoreApplication.instance().datasets
def mouseDoubleClickEvent(self, event=None):
if event is not None:
tab_index = self.tabAt(event.pos())
else:
tab_index = self.currentIndex()
if self.mutex.tryLock() and tab_index != self.count() - 1:
self.start_rename(tab_index)
def start_rename(self, tab_index):
self.__edited_tab = tab_index
rect = self.tabRect(tab_index)
top_margin = 3
left_margin = 6
self.__edit = QLineEdit(self)
self.__edit.show()
self.__edit.move(rect.left() + left_margin, rect.top() + top_margin)
self.__edit.resize(rect.width() - 2 * left_margin, rect.height() - 2 * top_margin)
self.__edit.setText(self.tabText(tab_index))
self.__edit.selectAll()
self.__edit.setFocus()
self.__edit.editingFinished.connect(self.finish_rename)
def finish_rename(self):
oldtext = self.tabText(self.__edited_tab)
text = re.sub(r" ", "_", str(self.__edit.text()).rstrip())
text = re.sub(r"[^A-Za-z0-9_]+", "", text).rstrip("_")
# TODO: possible to rename to same name as another dataset... prevent this.
self.setTabText(self.__edited_tab, text)
# emit signal that tab name was changed so configured tabs can change
self.datasets.rename(oldtext, text)
self.__edit.deleteLater()
self.mutex.unlock()
|
import cv2 # pip install opencv-python
print("Versão do OpenCV:", cv2.__version__)
webCam = cv2.VideoCapture(1)
while(True):
conectou, imagem = webCam.read()
cv2.imshow("Rosto", imagem)
teclou = cv2.waitKey(1) & 0xFF
if teclou == ord('q') or teclou == 27: # se apertar q ou ESC
break
webCam.release()
cv2.destroyAllWindows()
|
# Generated by Django 3.0.11 on 2020-11-26 16:35
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0170_remove_hidden_urls'),
]
operations = [
migrations.AddField(
model_name='question',
name='valid_date_max',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='question',
name='valid_date_min',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='question',
name='valid_datetime_max',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='question',
name='valid_datetime_min',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='question',
name='valid_number_max',
field=models.DecimalField(decimal_places=6, max_digits=16, null=True),
),
migrations.AddField(
model_name='question',
name='valid_number_min',
field=models.DecimalField(decimal_places=6, max_digits=16, null=True),
),
migrations.AlterField(
model_name='seat',
name='product',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='seats', to='pretixbase.Item'),
),
]
|
#LER 2 Nº E COMPARE-OS - MAIOR OU MENOR
|
import enum
from typing import Dict, List, Optional, Tuple
from .fluent_dict import FluentDict, FluentList
from .geometry import GeoPoint
from .request import RegionQuery, MapRegion, PayloadKind, RegionQueryBuilder, IgnoringStrategyKind, MapRegionBuilder
from .request import Request, GeocodingRequest, ExplicitRequest, RequestBuilder, RequestKind, ReverseGeocodingRequest
from .response import LevelKind, GeoRect
PROTOCOL_VERSION = 2
class Field(enum.Enum):
version = 'version'
mode = 'mode'
requested_payload = 'feature_options'
resolution = 'resolution'
view_box = 'view_box'
fetched_ids = 'fetched_ids'
option_kind = 'kind'
geo_object_list = 'ids'
region_queries = 'region_queries'
region_query_names = 'region_query_names'
region_query_parent = 'region_query_parent'
level = 'level'
map_region_kind = 'kind'
map_region_values = 'values'
match = 'match'
namesake_example_limit = 'namesake_example_limit'
allow_ambiguous = 'allow_ambiguous'
ambiguity_resolver = 'ambiguity_resolver'
ambiguity_ignoring_strategy = 'ambiguity_resolver_ignoring_strategy'
ambiguity_closest_coord = 'ambiguity_resolver_closest_coord'
ambiguity_box = 'ambiguity_resolver_box'
reverse_level = "level"
reverse_coordinates = "reverse_coordinates"
reverse_parent = "reverse_parent"
coord_lon = 'lon'
coord_lat = 'lat'
min_lon = 'min_lon'
min_lat = 'min_lat'
max_lon = 'max_lon'
max_lat = 'max_lat'
class RequestFormatter:
@staticmethod
def format(request: Request) -> FluentDict:
if isinstance(request, GeocodingRequest):
return RequestFormatter._format_geocoding_request(request)
elif isinstance(request, ExplicitRequest):
return RequestFormatter._format_explicit_request(request)
elif isinstance(request, ReverseGeocodingRequest):
return RequestFormatter._format_reverse_geocoding_request(request)
else:
raise ValueError('Unknown request kind: ' + str(request))
@staticmethod
def _format_geocoding_request(request: 'GeocodingRequest') -> FluentDict:
return RequestFormatter \
._common(RequestKind.geocoding, request) \
.put(Field.region_queries, RequestFormatter._format_region_queries(request.region_queries)) \
.put(Field.level, request.level) \
.put(Field.namesake_example_limit, request.namesake_example_limit) \
.put(Field.allow_ambiguous, request.allow_ambiguous)
@staticmethod
def _format_explicit_request(request: 'ExplicitRequest') -> FluentDict:
return RequestFormatter \
._common(RequestKind.explicit, request) \
.put(Field.geo_object_list, request.geo_object_list)
@staticmethod
def _format_reverse_geocoding_request(request: 'ReverseGeocodingRequest'):
return RequestFormatter \
._common(RequestKind.reverse, request) \
.put(Field.reverse_coordinates, [RequestFormatter._format_coord(coord) for coord in request.coordinates]) \
.put(Field.reverse_level, request.level) \
.put(Field.reverse_parent, RequestFormatter._format_map_region(request.scope))
@staticmethod
def _common(request_kind: RequestKind, request: Request) -> FluentDict:
return FluentDict() \
.put(Field.version, PROTOCOL_VERSION) \
.put(Field.mode, request_kind.value) \
.put(Field.requested_payload, request.requested_payload) \
.put(Field.resolution, request.resolution) \
.put(Field.view_box, None) \
.put(Field.fetched_ids, None)
@staticmethod
def _format_region_queries(region_queires: List[RegionQuery]) -> List[Dict]:
result = []
for query in region_queires:
result.append(
FluentDict()
.put(Field.region_query_names, [] if query.request is None else [query.request])
.put(Field.ambiguity_resolver, None if query.ambiguity_resolver is None else FluentDict()
.put(Field.ambiguity_ignoring_strategy, query.ambiguity_resolver.ignoring_strategy)
.put(Field.ambiguity_box, RequestFormatter._format_box(query.ambiguity_resolver.box))
.put(Field.ambiguity_closest_coord, RequestFormatter._format_coord(query.ambiguity_resolver.closest_coord))) \
.put(Field.region_query_parent, RequestFormatter._format_map_region(query.scope))
.to_dict()
)
return result
@staticmethod
def _format_map_region(parent: Optional[MapRegion]) -> Optional[Dict]:
if parent is None:
return None
return FluentDict() \
.put(Field.map_region_kind, parent.kind.value) \
.put(Field.map_region_values, parent.values) \
.to_dict()
@staticmethod
def _format_coord(closest_coord: GeoPoint) -> Optional[Tuple[float, float]]:
if closest_coord is None:
return None
return closest_coord.lon, closest_coord.lat
@staticmethod
def _format_box(rect: GeoRect) -> Optional[Dict]:
if rect is None:
return None
return FluentDict() \
.put(Field.min_lon, rect.min_lon) \
.put(Field.min_lat, rect.min_lat) \
.put(Field.max_lon, rect.max_lon) \
.put(Field.max_lat, rect.max_lat) \
.to_dict()
class RequestParser:
@staticmethod
def parse(request_json: Dict) -> Request:
request = RequestBuilder()
request_dict = FluentDict(request_json) \
.visit_enum(Field.mode, RequestKind, request.set_request_kind) \
.visit_enums(Field.requested_payload, PayloadKind, request.set_requested_payload) \
.visit_int_optional(Field.resolution, request.set_resolution)
if request.request_kind == RequestKind.explicit:
request_dict.visit_str_list(Field.geo_object_list, request.set_ids)
elif request.request_kind == RequestKind.geocoding:
request_dict \
.visit_enum_existing(Field.level, LevelKind, request.set_level) \
.visit_int(Field.namesake_example_limit, request.set_namesake_limit) \
.visit_bool(Field.allow_ambiguous, request.set_allow_ambiguous) \
.visit_list(Field.region_queries, lambda regions: request.set_queries(regions.map(RequestParser._parse_region_query).list()))
elif request.request_kind == RequestKind.reverse:
request_dict \
.visit_enum_existing(Field.reverse_level, LevelKind, request.set_level) \
.visit_list(Field.reverse_coordinates, lambda coords: request.set_reverse_coordinates(RequestParser._parse_coordinates(coords))) \
.visit_object_optional(Field.reverse_parent,
lambda parent: request.set_reverse_scope(RequestParser._parse_map_region(parent)))
else:
raise ValueError('Unknown request kind: ' + str(request))
return request.build()
@staticmethod
def _parse_region_query(region_query: dict) -> RegionQuery:
region_q = FluentDict(region_query)
assert len(region_q.get_list(Field.region_query_names)) in [0, 1], 'Multirequests are not supported'
builder = RegionQueryBuilder()
FluentDict(region_query) \
.visit_str_list(Field.region_query_names, lambda names: builder.set_request(names[0] if len(names) == 1 else None)) \
.visit_object_optional(Field.ambiguity_resolver, lambda resolver: resolver
.visit_list_optional(Field.ambiguity_closest_coord,
lambda coord: builder.set_closest_coord(RequestParser._parse_coord(coord)))
.visit_object_optional(Field.ambiguity_box,
lambda jsonBox: builder.set_box(RequestParser._parse_geo_rect(jsonBox)))
.visit_enum_existing(Field.ambiguity_ignoring_strategy, IgnoringStrategyKind, builder.set_ignoring_strategy)) \
.visit_object_optional(Field.region_query_parent, lambda parent: builder.set_scope(RequestParser._parse_map_region(parent)))
return builder.build()
@staticmethod
def _parse_map_region(json: FluentDict) -> MapRegion:
builder = MapRegionBuilder()
json \
.visit_str_list(Field.map_region_values, builder.set_parent_values) \
.visit_bool(Field.map_region_kind, builder.set_parent_kind)
return builder.build()
@staticmethod
def _parse_coord(jsonCoord: FluentList) -> GeoPoint:
return GeoPoint(jsonCoord.list()[0], jsonCoord.list()[1])
@staticmethod
def _parse_geo_rect(jsonBox: FluentDict) -> GeoRect:
return GeoRect(
min_lon=jsonBox.get_float(Field.min_lon),
min_lat=jsonBox.get_float(Field.min_lat),
max_lon=jsonBox.get_float(Field.max_lon),
max_lat=jsonBox.get_float(Field.max_lat),
)
@staticmethod
def _parse_coordinates(coordinates: FluentList) -> List[GeoPoint]:
return [GeoPoint(coord[0], coord[1]) for coord in coordinates.list()]
|
from rest_framework.serializers import ModelSerializer
from .models import SimData
class SimSerializer(ModelSerializer):
class Meta:
model = SimData
fields = ['id', 'healthyYoung', 'healthyYoungFreerider',
'sickYoung', 'healthyElderly', 'healthyElderlyFreerider',
'sickElderly', 'vaccines', 'timeSpan'] |
import argparse as arg
import markov as markov
import sys
import re
import os
import glob
import pickle
"""
This module processes the text and builds Markov
model. The interaction is through CLI
"""
def process_console():
"""
sets arguments for the CLI
:return: parser
"""
parser = arg.ArgumentParser()
parser.add_help = False
parser.add_argument("--model",
help="path to store the model as a binary file")
parser.add_argument("--input-dir",
help="specifies the input dir with source .txt UTF-8 files, optional")
parser.add_argument("--lc", help="lowercase th input", action="store_true")
args_list = parser.parse_args()
return args_list
class ModelTrainer:
"""
This class contains all the operations to train the model.
Its fields define key parameters for unit-testing
"""
def __init__(self, model_dir, input_dir, is_lower=False):
self.model = markov.MarkovModel() # Markov model dicitonary
self.model_dir = model_dir # specifies where to store the model
self.input_dir = input_dir # input directory with sources
self.is_lower = is_lower # is there a need to lowercase?
self.files_list = [] # sources to build the model
self.model_order = 4 # magic number!!!
def process_sources(self):
# input. Scripts accepts only UTF8 .txt fils
if self.input_dir is not None:
path = self.input_dir
for i in os.listdir(path):
if i.endswith('.txt'):
self.files_list.append(i)
if len(self.files_list) == 0:
raise RuntimeError("Empty sources directory!!!")
else:
# Console input is triggered
self.files_list.append(sys.stdin)
def build_model(self):
for file_name in self.files_list:
if self.files_list[0] is not sys.stdin:
try:
file = open(self.input_dir +
"/" + file_name, "r")
except:
raise RuntimeError("Problems with sources input")
else:
file = self.files_list[0]
while True:
try:
line = file.readline()
except:
raise RuntimeError("Failed to read the line in " + file_name)
if not line:
break
if self.is_lower:
line = line.lower()
raw_data = re.findall("[А-Яа-яa-zA-Zא-ת']+", line)
for order in range(self.model_order):
self.model.update_model(data=raw_data, model_order=order)
def save_model(self):
try:
model_file = open(str(self.model_dir), "wb+")
pickle.dump(self.model, model_file, pickle.HIGHEST_PROTOCOL)
except FileNotFoundError:
print("There are some problems with your mode file path")
model_dir = os.path.dirname(self.input_dir)
if not os.path.exists(model_dir):
print("Directory does not exist. Creating directory")
os.mkdir(model_dir)
def main():
args_list = process_console()
trainer = ModelTrainer(model_dir=args_list.model, input_dir=args_list.input_dir, is_lower=args_list.lc)
if __name__ == "__main__":
args_list = process_console()
trainer = ModelTrainer(model_dir=args_list.model, input_dir=args_list.input_dir, is_lower=args_list.lc)
trainer.process_sources()
trainer.build_model()
trainer.save_model()
|
from django.apps import apps
from django.contrib.auth import models as auth
import graphene
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from aristotle_mdr import models as mdr
from aristotle_dse import models as dse
from aristotle_mdr.contrib.identifiers import models as identifiers
from aristotle_mdr.contrib.links import models as links
from aristotle_mdr.contrib.slots import models as slots
from .filters import MetadataFilter
class baseMetadataTypeMixin(DjangoObjectType):
class Meta:
model = mdr._concept
@classmethod
def get_node(cls, id, context, info):
# This doesn't actually get called ?
try:
return cls._meta.model.objects.filter(id=id).visible(context.user)
except cls._meta.model.DoesNotExist:
return None
class ConceptType(baseMetadataTypeMixin):
"""
Use this to get metadata
"""
class Meta:
model = mdr._concept
interfaces = (graphene.relay.Node, )
class StatusType(DjangoObjectType):
class Meta:
model = mdr.Status
class OrganizationType(DjangoObjectType):
class Meta:
model = mdr.Organization
class RegistrationAuthorityType(OrganizationType):
class Meta:
model = mdr.RegistrationAuthority
def issubclass_strict(cls, base):
return cls and issubclass(cls, base) and cls is not base
def makeTypes():
from django.contrib.contenttypes.models import ContentType
for ct in ContentType.objects.all():
klass = ct.model_class()
if issubclass_strict(klass, mdr._concept):
exec(
"\n".join([
"class {app}_{model}Type(DjangoObjectType):",
" __doc__ = apps.get_model('{app}','{model}').__doc__.replace(' ','') ",
" class Meta:",
" model = apps.get_model('{app}','{model}')",
" @classmethod",
" def get_node(*args, **kwargs): 1/0"
]).format(app=ct.app_label,model=ct.model,)
)
if issubclass_strict(klass, mdr.AbstractValue):
exec(
"\n".join([
"class {app}_{model}Type(DjangoObjectType):",
" __doc__ = apps.get_model('{app}','{model}').__doc__.replace(' ','') ",
" class Meta:",
" model = apps.get_model('{app}','{model}')",
]).format(app=ct.app_label,model=ct.model,)
)
class LinkEndType(DjangoObjectType):
class Meta:
model = links.LinkEnd
class LinkType(DjangoObjectType):
class Meta:
model = links.Link
class RelationRoleType(DjangoObjectType):
class Meta:
model = links.RelationRole
class SlotType(DjangoObjectType):
class Meta:
model = slots.Slot
class IdentifierType(DjangoObjectType):
class Meta:
model = identifiers.ScopedIdentifier
class NamespaceType(DjangoObjectType):
class Meta:
model = identifiers.Namespace
class DSSDEInclusionType(DjangoObjectType):
class Meta:
model = dse.DSSDEInclusion
makeTypes()
class Query(graphene.AbstractType):
"""
"""
all_metadata = DjangoFilterConnectionField(ConceptType, filterset_class=MetadataFilter)
all_registrationauthorities = graphene.List(RegistrationAuthorityType)
def resolve_all_metadata(self, args, context, info):
return mdr._concept.objects.all().visible(context.user)
def resolve_all_registrationauthorities(self, args, context, info):
# We can easily optimize query count in the resolve method
return mdr.RegistrationAuthority.objects.all().visible(context.user)
class AristotleQuery(Query, graphene.ObjectType):
"""My Cool GraphQL Endpoint"""
# This class will inherit from multiple Queries
# as we begin to add more apps to our project
pass
schema = graphene.Schema(query=AristotleQuery)
|
# Generated by Django 3.1.7 on 2021-05-02 18:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0015_auto_20180423_0727'),
('profiles', '0011_accountstatus_user_name'),
]
operations = [
]
|
from scoring_engine.models.property import Property
from tests.scoring_engine.helpers import generate_sample_model_tree
from tests.scoring_engine.unit_test import UnitTest
class TestProperty(UnitTest):
def test_init_property(self):
property_obj = Property(name="testname", value="testvalue")
assert property_obj.id is None
assert property_obj.name == "testname"
assert property_obj.value == "testvalue"
assert property_obj.environment is None
assert property_obj.environment_id is None
def test_basic_property(self):
environment = generate_sample_model_tree('Environment', self.db)
property_obj = Property(name="ip", value="127.0.0.1", environment=environment)
self.db.save(property_obj)
assert property_obj.id is not None
assert property_obj.environment == environment
assert property_obj.environment_id == environment.id
assert property_obj.visible is False
def test_nonhidden_property(self):
environment = generate_sample_model_tree('Environment', self.db)
property_obj = Property(name="ip", value="127.0.0.1", environment=environment, visible=True)
self.db.save(property_obj)
assert property_obj.visible is True
|
from django.apps import AppConfig
class VivaappConfig(AppConfig):
name = 'vivaapp'
|
import os
import shutil
from pathlib import Path
import random
class TestSupport:
"""
Macros, utility functions for tests.
"""
THIS_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
PROJECT_DIR = THIS_DIR.parent
SUBJECTS_DIR = PROJECT_DIR / "test-subjects"
class get_playground_path:
SUBJECTS_DIR = None
def __init__(self, path: Path = None):
if path is None:
path = self.SUBJECTS_DIR / "playground_{}".format(random.randint(100000, 999999))
# end if
self.path = path # Path
if self.path.exists():
shutil.rmtree(self.path)
# end if
self.path.mkdir(exist_ok=True)
self.old_path = Path.cwd() # Path
return
def __enter__(self):
os.chdir(self.path)
return
def __exit__(self, type, value, tb):
shutil.rmtree(self.path)
os.chdir(self.old_path)
return
# end class
get_playground_path.SUBJECTS_DIR = SUBJECTS_DIR
|
from abc import ABC, abstractmethod
class AbstractComparator(ABC):
pass
|
# coding=utf-8
import base64
from data import s, rs, Rcon
# key块 为四位字符串
# S 盒转化
def S(key):
ans = ''
for i in key:
tmp = "0x" + "{:02x}".format(ord(i))
ans += chr(int(s[tmp], 16))
return ans
# 逆S盒转化
def RS(key):
ans = ''
for i in key:
tmp = "0x" + "{:02x}".format(ord(i))
ans += chr(int(rs[tmp], 16))
return ans
# T 函数,对key进行处理,order为第几轮处理
def T(key, order):
# 循环左移
key += key[0]
key = key[1:]
# S 盒转化
key = S(key)
# 异或运算
key = list(key)
for i in range(len(key)):
key[i] = chr(ord(key[i]) ^ int(Rcon[order][i], 16))
key = ''.join(key)
return key
# 密钥块之间进行异或运算
def xor(a, b):
a = list(a)
b = list(b)
for i in range(len(a)):
a[i] = chr(ord(a[i]) ^ ord(b[i]))
a = ''.join(a)
return a
# keys 为16位密钥字符串,返回44 bytes的扩展key
def key_ext(keys):
W = []
cur = ''
# 初始 密钥划成4份
for i in range(0, len(keys), 4):
cur = ''.join(keys[i:i + 4])
W.append(cur)
# 十轮密钥
pos = 4
for i in range(40):
t1 = W[pos - 4]
if pos % 4 == 0:
# 被四整除 特殊处理
t2 = T(W[pos - 1], i / 4)
else:
t2 = W[pos - 1]
tmp = xor(t1, t2)
W.append(tmp)
pos += 1
return W
# 轮密匙加,mess 为待加密信息,cir为加密轮数,从0开始
def cir_key_add(mess, cir, W):
res = ''
for i in range(4):
res += xor(mess[4 * i:4 * i + 4], W[4 * cir + i])
return res
# 行位移,mess为需要位移的16为字符串
def row_change(mess):
mess = list(mess)
ans = ''
for i in range(0, 16, 4):
for j in range(4):
ans += mess[(i + 5 * j) % 16]
return ans
# 逆行移位
def re_row_change(mess):
ans = mess[0] + mess[13] + mess[10] + mess[7]
ans += mess[4] + mess[1] + mess[14] + mess[11]
ans += mess[8] + mess[5] + mess[2] + mess[15]
ans += mess[12] + mess[9] + mess[6] + mess[3]
return ans
# 有限域 乘法递归实现,加法相当于异或
def muti(a, b):
t = 0
res = 0
if a == 2:
tag = "{:08b}".format(b)
if tag[0] == '0':
res = int(tag[1:] + '0', 2)
else:
res = int(tag[1:] + '0', 2) ^ int('00011011', 2)
elif a == 1:
res = b
else:
cur = 1
cnt = 0
while cur <= a:
cnt += 1
cur *= 2
cur /= 2
cnt -= 1
r = (a - cur)
t = muti(2, b)
for i in range(cnt-1):
t = muti(2, t)
if r != 0:
res = t ^ muti(r, b)
else:
res = t
# print res,
return res
# 列混合,有限域上的运算
def col_mix(mess):
mess = list(mess)
ans = ''
# print "列混合前:"+''.join(mess).encode('hex')
for i in range(0, len(mess), 4):
cur = mess[i:i + 4]
ans += chr(muti(2, ord(cur[0])) ^ muti(3, ord(cur[1])) ^ ord(cur[2]) ^ ord(cur[3]))
# print muti(2, ord(cur[0])) ^ muti(3, ord(cur[1])) ^ ord(cur[2]) ^ ord(cur[3]),
ans += chr(ord(cur[0]) ^ muti(2, ord(cur[1])) ^ muti(3, ord(cur[2])) ^ ord(cur[3]))
# print ord(cur[0]) ^ muti(2, ord(cur[1])) ^ muti(3, ord(cur[2])) ^ ord(cur[3]),
ans += chr(ord(cur[0]) ^ ord(cur[1]) ^ muti(2, ord(cur[2])) ^ muti(3, ord(cur[3])))
# print ord(cur[0]) ^ ord(cur[1]) ^ muti(2, ord(cur[2])) ^ muti(3, ord(cur[3])),
ans += chr(muti(3, ord(cur[0])) ^ ord(cur[1]) ^ ord(cur[2]) ^ muti(2, ord(cur[3])))
# print muti(3, ord(cur[0])) ^ ord(cur[1]) ^ ord(cur[2]) ^ muti(2, ord(cur[3])),
# print "列混合后:"+ans.encode('hex')
return ans
# 逆列混合运算
def re_col_mix(mess):
mess = list(mess)
# print "列混合后"+''.join(mess).encode('hex')
ans = ''
for i in range(0, len(mess), 4):
cur = mess[i:i + 4]
ans += chr(
muti(0x0e, ord(cur[0])) ^ muti(0x0b, ord(cur[1])) ^ muti(0x0d, ord(cur[2])) ^ muti(0x09, ord(cur[3])))
# print muti(0x0e, ord(cur[0])) ^ muti(0x0b, ord(cur[1])) ^ muti(0x0d, ord(cur[2])) ^ muti(0x09, ord(cur[3])),
ans += chr(
muti(0x09, ord(cur[0])) ^ muti(0x0e, ord(cur[1])) ^ muti(0x0b, ord(cur[2])) ^ muti(0x0d, ord(cur[3])))
# print muti(0x09, ord(cur[0])) ^ muti(0x0e, ord(cur[1])) ^ muti(0x0b, ord(cur[2])) ^ muti(0x0d, ord(cur[3])),
ans += chr(
muti(0x0d, ord(cur[0])) ^ muti(0x09, ord(cur[1])) ^ muti(0x0e, ord(cur[2])) ^ muti(0x0b, ord(cur[3])))
# print muti(0x0d, ord(cur[0])) ^ muti(0x09, ord(cur[1])) ^ muti(0x0e, ord(cur[2])) ^ muti(0x0b, ord(cur[3])),
ans += chr(
muti(0x0b, ord(cur[0])) ^ muti(0x0d, ord(cur[1])) ^ muti(0x09, ord(cur[2])) ^ muti(0x0e, ord(cur[3])))
# print muti(0x0b, ord(cur[0])) ^ muti(0x0d, ord(cur[1])) ^ muti(0x09, ord(cur[2])) ^ muti(0x0e, ord(cur[3])),
# print "列混合前:"+ans.encode('hex')
return ans
# AES_128_ECB 加密,采用pkcs5padding方式填充
def AES_128_ECB(message, keys):
# 先进行分组,128位一组
tot = len(message) // 16
record = []
for i in range(tot):
record.append(message[16 * i: 16 * i + 16])
left = 16 * (tot + 1) - len(message)
if left == 0:
record.append(chr(16) * 16)
else:
tmp = message[tot * 16:] + chr(left) * left
record.append(tmp)
enc = ''
# 密钥扩展
W = key_ext(keys)
# 对每一组进行加密
for mess in record:
# 初始的 密钥加
# print '第0轮加密....','加密前:'+mess.encode('hex'),
mess = cir_key_add(mess, 0, W)
# print '加密后', mess.encode('hex')
# 十轮加密
# 前九轮
for i in range(0, 9):
# print "第{}轮加密....".format(i + 1),'加密前:'+mess.encode('hex'),
# 字节代换
mess = S(mess)
# 行位移
mess = row_change(mess)
# 列混合
mess = col_mix(mess)
# 轮换密匙加
mess = cir_key_add(mess, i + 1, W)
# print "加密后:".format(i + 1), mess.encode('hex')
# 第十轮加密
# print "第10轮加密...",'加密前:'+mess.encode('hex'),
mess = S(mess)
mess = row_change(mess)
mess = cir_key_add(mess, 10, W)
# print "加密后:", mess.encode('hex')
enc += mess
return enc
# 128 位 的块解密
def decry_block(block,W):
i = block
# 第一轮
# print '解密第10轮', '解密前:' + i.encode('hex'),
i = cir_key_add(i, 10, W)
i = re_row_change(i)
i = RS(i)
# print '解密后:', i.encode('hex')
# 后续九轮
for r in range(9, 0, -1):
# print '解密第{}轮'.format(r), "解密前:" + i.encode('hex'),
i = cir_key_add(i, r, W)
i = re_col_mix(i)
i = re_row_change(i)
i = RS(i)
# print '解密后:', i.encode('hex')
# print '解密第0轮', "解密前:" + i.encode('hex'),
i = cir_key_add(i, 0, W)
# print '解密后:', i.encode('hex')
return i
# 解密
def decry_AES_128_ECB(enc, key):
# 密钥扩展
W = key_ext(key)
# 先进行分组,128位一组
tot = len(enc) // 16
record = []
for i in range(0, tot):
record.append(enc[16 * i: 16 * i + 16])
message = ''
if len(record) == 1:
i = record[0]
i = decry_block(i, W)
t = i[-1]
pos = 0
for j in range(len(i) - 1, 0, -1):
if i[j] != t:
pos = j + 1
break
message = i[0:pos]
else:
for i in range(0,len(record)-1):
block = record[i]
message += decry_block(block,W)
# 最后一块,特殊处理
lst = decry_block(record[-1],W)
pt = lst[-1]
pos = 0
for i in range(len(lst)-1,-1,-1):
if lst[i] != pt:
pos = i+1
break
message += lst[0:pos]
return message
keys = 'YELLOW SUBMARINE'
# mess = 'happy_2020aaaaaaaaaa'
# t = 't8is_Is a teSt!!'
# enc = AES_128_ECB(mess, keys)
# print enc
# print decry_AES_128_ECB(enc,keys)
## '74e3a6263a5a56d86553904f151b3b18'
file = open('7.txt','r').read()
from base64 import b64decode
print decry_AES_128_ECB(b64decode(file),keys)
## 一行一行读,却不行。
|
import micropython
import gc
import os
def df():
s = os.statvfs('//')
return ('{0} MB'.format((s[0]*s[3])/1048576))
def free(full=False):
F = gc.mem_free()
A = gc.mem_alloc()
T = F+A
P = '{0:.2f}%'.format(F/T*100)
if not full:
return P
else:
return ('Total:{0} Free:{1} ({2})'.format(T, F, P))
def printHeapInfo():
print("Free:", free(), " mem_info:", micropython.mem_info())
# print(micropython.mem_info())
# print(free())
# printHeapInfo()
|
from cytomine import Cytomine
import cytomine.models
import time
import shapely
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import pylab as plt
import openslide as ops
from fig2img import fig2img,SaveFigureAsImage
from Histupload import HistUpload
#Replace XXX values by your values
# the web url of cytomine instance, always without the protocol
# your public & private keys of your account on Cytomine (can be found on your Account details on Cytomine)
protocol = 'http://'
cytomine_core_path="robbin.eecs.qmul.ac.uk"
cytomine_public_key="2f4648cb-a3bb-4d0a-9d98-bfcf1d8e2854"
cytomine_private_key="8a6c2440-0964-4948-9bad-4c9dd5d1edf6"
test_histo = HistUpload()
# check connection to the Cytomine instance
core_conn = Cytomine(cytomine_core_path,cytomine_public_key,cytomine_private_key, verbose= False)
# check that the storage exists
tproject = core_conn.get_projects().data() # Targeted_project_id = 28282
project_id = 28282
test_instance = core_conn.get_project_image_instances(project_id)
tonto = core_conn.get_terms(id_ontology=27995)
terms_dict = {}
for tterm in tonto.data():
terms_dict[tterm.id] = tterm.name
#image_index = 0
for image_index in range(len(test_instance)):
timage = test_instance[image_index]
image_id = timage.id
tannotations = core_conn.get_annotations(id_project=project_id,id_image=image_id)
#anno_index = 0
tannos = tannotations.data()
print "working on %s"%timage.fullPath+'number of annotaions in %d'%len(tannos)
for anno_index in range(len(tannos)):
try:
tanno_id = tannos[anno_index].id
#print tanno_id
tannotation = core_conn.get_annotation(id_annotation=tanno_id)
tlocation = tannotation.location
g1 = shapely.wkt.loads(tlocation)
polygons =[g1]
int_coords = lambda x: np.array(x).round().astype(np.int32)
exteriors = [int_coords(poly.exterior.coords) for poly in polygons]
interiors = [int_coords(pi.coords) for poly in polygons
for pi in poly.interiors]
sx,sy,w,h = exteriors[0][:,0].min(),exteriors[0][:,1].max(),exteriors[0][:,0].max()-exteriors[0][:,0].min(),exteriors[0][:,1].max()-exteriors[0][:,1].min(),
wsi = ops.open_slide(timage.fullPath.replace('DATA_CYTO','usbflash'))
wx,wy = wsi.dimensions
if w*h >10000*10000:
level = 2
elif w*h >3000*3000:
level = 1
else:
level = 0
ttimage = wsi.read_region((sx,wy-sy),level,(w/(2**level),h/(2**level)))
#sx,sy,w,h =(xx.min(),yy.max(),xx.max()-xx.min(),yy.max()-yy.min())
fig,ax = plt.subplots()
ax.imshow(ttimage)
ax.axis('image')
ax.axis('off')
ax.plot( (exteriors[0][:,0]-sx)/(2**level),(sy-exteriors[0][:,1])/(2**level),'y-',linewidth =5)
for tinter_index in range(len(interiors)):
ax.plot((interiors[tinter_index][:,0]-sx)/(2**level),(sy-interiors[tinter_index][:,1])/(2**level),'y-',linewidth =5)
#plt.axis('off')
#plt.title('%d-%d-%d'%(project_id,image_id,tanno_id))
#print(terms_dict[tannos[0].term[0]])
image_path = '/tmp/%d-%d-%d.png'%(project_id,image_id,tanno_id)
img = fig2img(fig)
img.save(image_path)
#SaveFigureAsImage(fileName=image_path,fig=fig)
plt.close(fig)
test_histo.Upload(image_path=image_path,\
subfolder=terms_dict[tannos[anno_index].term[0]],\
comment=u'robbin.eecs.qmul.ac.uk/#tabs-image-%d-%d-%d'%(project_id,image_id,tanno_id))
except Exception as e:
print e |
from pydantic import BaseModel
from decimal import Decimal
class productBase(BaseModel):
product_code: str
product_name: str
price: Decimal
|
"""
Prediction Processor:
This adaptor act as the interface between the prediction process's output and the activity you want to perform on that.
As this is an independent adaptor, we can replicate the same for many multiple output methods.
"""
from __future__ import print_function, division, with_statement
from AnalyticalEngine import AnalyticalEngine
class PredictionProcessor(object):
def __init__(self, analytical_method):
print('Prediction Processor Invoked!')
self.analytical_method = analytical_method
self.prediction = None
self.output = None
self.model_object = AnalyticalEngine(analytical_method)
print('This prediction can be directly sent and can be stored temporarily!')
def outputs(self):
return self.output
def make_prediciton(self, inputs):
self.prediction = self.model_object.get_prediction(inputs)
print('We get prediction as: ', self.prediction)
self.output = self.prediction
return self.prediction
|
# https://tjkendev.github.io/procon-library/python/math/gcd.html
# Euclidean Algorithm
def gcd(m, n):
r = m % n
return gcd(n, r) if r else n
# Euclidean Algorithm (non-recursive)
def gcd2(m, n):
while n:
m, n = n, m % n
return m
# Extended Euclidean Algorithm
def extgcd(a, b):
if b:
d, y, x = extgcd(b, a % b)
y -= (a // b)*x
return d, x, y
return a, 1, 0
# lcm (least common multiple)
def lcm(m, n):
return m//gcd(m, n)*n
#a,bの最大公約数
def gcd(a, b):
while b:
a, b = b, a % b
return a
#a,bの最小公倍数
def lcm(a, b):
return a * b // gcd (a, b)
# 複数の数字の最大公約数
import functools
def euclid(a, b):
if b == 0:
return a
else:
return euclid(b, a%b)
def gcd(nums):
return functools.reduce(euclid, nums) |
import cv2
import numpy as np
d = 400
img = np.ones((d, d, 3), dtype=np.uint8) * 255
pts = np.array([[200, 50], [300, 200], [200, 350], [100, 200]], np.int32)
pts = pts.reshape(-1, 1, 2)
cv2.polylines(img, [pts], True, (0, 255, 0), 8)
winname = "Demo19.01"
cv2.namedWindow(winname)
cv2.imshow(winname, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
# Set up analysis and PATHS
# imports
import platform
import os
import pandas as pd
import numpy as np
import json
import sys
from sklearn.preprocessing import OneHotEncoder
from .utils.fmri_core import analysis as pa
from .utils.fmri_core import utils as pu
from .utils.fmri_core import vis as pv
from .utils.fmri_core import rsa
# if sys.platform == 'darwin':
# home = os.path.join("/Users", "njchiang", "GitHub")
# plat = "osx"
# elif sys.platform == "linux":
# import platform
# if platform.linux_distribution()[0] == "debian":
# home = os.path.join("/home", "njchiang", "data", "CloudStation", "Grad",
# "Research", "montilab-ucla")
# plat = "linux"
# else:
# home = os.path.join("/u", "project", "monti", "Analysis", "Analogy",
# "code")
# plat = "hoff"
# else:
# home = os.path.join("D:\\", "GitHub")
# plat = "win"
# home = os.path.join("/u", "project", "monti", "Analysis", "Analogy", "code")
plat = "hoff"
# cfg = os.path.join(home, "analogy-fmri", "config", "project.json")
# not sure if this will work...
cfg = "config/project.json"
with open(cfg, "r") as f:
projectSettings = json.load(f)
# # sys.path.append(paths["github"])
# # sys.path.append(paths["code"])
projecttitle="Analogy"
PATHS = projectSettings["filepaths"]["{}Paths".format(plat)]
# sys.path.append(PATHS["github"])
# load analysis settings
# analysisSettings = pu.load_config(os.path.join(PATHS["code"], "config", "analyses.json"))
# contrastSettings = pu.load_config(os.path.join(PATHS["code"], "config", "contrasts.json"))
analysisSettings = pu.load_config("config/analyses.json")
contrastSettings = pu.load_config("config/contrasts.json")
# trial order
# order = pu.load_labels(os.path.join(PATHS["code"], "labels", "trialorder_rsa_absorted.csv"))
order = pu.load_labels(os.path.join("labels", "trialorder_rsa_absorted.csv"))
def compile_models(write=False):
# return dictionary of dictionaries
typicality = pu.load_labels(os.path.join("labels", "typicality.csv"))
w2vdiffs = pu.load_labels(os.path.join("labels", "word2vec_diffs.csv"))
humanratings = pu.load_labels(os.path.join("labels", "humanratings.csv"), skiprows=2)
rstpostprob9 = pu.load_mat_data(os.path.join("labels", "rstpostprob9.mat"))
rstpostprob79 = pu.load_mat_data(os.path.join("labels", "rstpostprob79.mat"))
rstpostprob79thresh = pu.load_mat_data(os.path.join("labels", "rstpostprob79.thresh.mat"))
rstpostprob79norm = pu.load_mat_data(os.path.join("labels", "rst.BART79norm.mat"))
rstpostprob79power = pu.load_mat_data(os.path.join("labels", "rst.BART79normpower.mat"))
rstpostprob270 = pu.load_mat_data(os.path.join("labels", "rst.postprobSP.MAT"))
concatword = pu.load_mat_data(os.path.join("labels", "w2vconcat.mat"))
accuracies = pu.load_labels(os.path.join("labels", "group_accuracy.csv"))
accuracies["ABTag"] = accuracies["Trial"].apply(lambda x: x.split("::")[0])
accuracies["CDTag"] = accuracies["Trial"].apply(lambda x: x.split("::")[1])
accuracies = accuracies.groupby("ABTag").mean()
mat_accuracy = {}
mat_concatword = {}
mat_rstpostprob79 = {}
mat_rstpostprob79thresh = {}
mat_rstpostprob79norm = {}
mat_rstpostprob79power = {}
mat_humanratings = {}
mat_rstpostprob9 = {}
mat_mainrel = {}
mat_subrel = {}
mat_rel = {}
mat_sem = {}
mat_intuit = {}
mat_w2v = {}
wordpairs = []
mat_typicality = {}
mainenc = OneHotEncoder(4)
subenc = OneHotEncoder(10)
for i in range(len(rstpostprob79["wordpair"])):
wordpair = rstpostprob79["wordpair"][i, 0][0]
wordpairs.append(wordpair)
ci = np.where(concatword["wordpair"][:, :] == wordpair)[0][0]
mat_accuracy[wordpair] = accuracies.loc[wordpair].astype(np.float)
mat_concatword[wordpair] = concatword["concwordmat"].astype(np.float)[ci]
mat_rstpostprob9[wordpair] = rstpostprob9["rstpostprob_sm"].astype(np.float)[i]
mat_rstpostprob79[wordpair] = rstpostprob79["rstpostprob"].astype(np.float)[i]
mat_rstpostprob79thresh[wordpair] = rstpostprob79thresh["rstpostprob"].astype(np.float)[i]
mat_humanratings[wordpair] = humanratings[humanratings.wordpair == wordpair].values[0, 1:].astype(np.float)
mat_typicality[wordpair] = typicality[typicality.wordpair == wordpair].values[0, 1:].astype(np.float)
mat_w2v[wordpair] = w2vdiffs[w2vdiffs.wordpair == wordpair].values[0, 1:].astype(np.float)
mat_mainrel[wordpair] = mainenc.fit_transform(rstpostprob79["wordpair"][i, 1] -
1).toarray()[0, :-1]
mat_subrel[wordpair] = subenc.fit_transform(rstpostprob79["wordpair"][i, 2] -
1).toarray()[0, :-1]
mat_rel[wordpair] = np.hstack([mat_mainrel[wordpair], mat_subrel[wordpair]])
mat_sem[wordpair] = np.hstack([mat_mainrel[wordpair][0] +
mat_mainrel[wordpair][1],
mat_mainrel[wordpair][2]])
mat_intuit[wordpair] = np.hstack([mat_sem[wordpair],
mat_rel[wordpair]])
mat_rstpostprob270 = {wordpair[0][0]: rstpostprob270["pred_prob_pos1temp"][0][0][i] for i, wordpair in enumerate(rstpostprob270["all_test_names"])}
for i in range(len(rstpostprob79norm["wordpairname"])):
wordpair = rstpostprob79norm["wordpairname"][i, 0][0]
mat_rstpostprob79norm[wordpair] = rstpostprob79norm["pred_prob"].astype(np.float)[i]
mat_rstpostprob79power[wordpair] = rstpostprob79power["pred_prob"].astype(np.float)[i]
models = {
"wordpairs": wordpairs,
"humanratings": mat_humanratings,
"accuracy": mat_accuracy,
"rstpostprob79": mat_rstpostprob79,
"rstpostprob79thresh": mat_rstpostprob79thresh,
"rstpostprob79norm": mat_rstpostprob79norm,
"rstpostprob79power": mat_rstpostprob79power,
"rstpostprob270": mat_rstpostprob270,
"rstpostprob9": mat_rstpostprob9,
"w2vdiff": mat_w2v,
"mainrel": mat_mainrel,
"subrel": mat_subrel,
"rel": mat_rel,
"sem": mat_sem,
"intuit": mat_intuit,
"concatword": mat_concatword,
"typicality": mat_typicality
}
return models
def load_rois(t="cope-LSS", logger=None):
betas = {}
labels = {}
for s in projectSettings["subjects"].keys():
pu.write_to_logger("Loading {} betas".format(s), logger)
betas[s] = np.load(os.path.join(PATHS["root"], "derivatives", s, "rois",
"{}_{}-betas.npz".format(s, t)))
labels[s] = pu.load_labels(os.path.join(PATHS["root"], "derivatives", s, "rois",
"{}_{}_labels.csv".format(s, t)))
return betas, labels
def save_rois(masks_dict, t="tstat-LSS", logger=None):
for sub in projectSettings["subjects"]:
pu.write_to_logger("Writing {} betas".format(sub), logger)
if "condensed" in t:
img, labels, _ = load_condensed_betas(projectSettings, sub, t,
logger=logger)
else:
img, labels, _ = load_betas(projectSettings, sub, t, logger=logger)
betas = {}
for mask, maskname in masks_dict.items():
betas[mask] = pu.mask_img(img, pu.load_img(
os.path.join(
PATHS['root'], 'derivatives', sub, 'masks',
projectSettings["templates"]["masks"].format(maskname)),
logger=logger
))
labels.to_csv(os.path.join(PATHS["root"], "derivatives", sub, "rois",
"{}_{}_labels.csv".format(sub, t)), index=False)
np.savez_compressed(os.path.join(PATHS["root"], "derivatives", sub, "rois",
"{}_{}-betas.npz".format(sub, t)), **betas)
def load_condensed_betas(settings, sub, t="LSS-condensed", logger=None):
# whiten the data
labels = order[::2].loc[:, ["ABTag",
"ABMainRel",
"ABSubRel"]].reset_index(drop=True)
labels["AB"] = 1
labels["TrialTag"] = labels["ABTag"]
fmri_data = pu.load_img(os.path.join(
PATHS['root'], 'derivatives', sub, 'betas',
"{}_task-analogy_betas-{}.nii.gz".format(sub, t)),
logger=logger)
bg_image = pu.load_img(
os.path.join(PATHS['root'], 'derivatives', sub,
'reg', settings["templates"]["reg"]),
logger=logger
)
return fmri_data, labels, bg_image
def load_betas(settings, sub, t="tstat-LSS", center=True, scale=False,
logger=None):
labels = []
imgs = []
# whiten the data
for ri, r in enumerate(settings["subjects"][sub]):
if center:
imgs.append(
pu.center_img(os.path.join(
PATHS['root'], 'derivatives', sub, 'betas',
settings["templates"]["betas"].format(sub, r, t)),
logger=logger))
else:
imgs.append(os.path.join(PATHS['root'], 'derivatives', sub, 'betas',
settings["templates"]["betas"].format(sub, r, t)))
if "subrel" in t:
labels.append(pu.load_labels(PATHS["root"],
'derivatives', sub, 'betas',
"{}_task-analogy_{}_events-subrel.tsv".format(sub, r),
sep='\t', logger=logger))
labels[ri]["chunks"] = pd.Series([ri+1 for _ in range(len(labels[
ri]))])
else:
labels.append(pu.load_labels(PATHS["root"],
'derivatives', sub, 'func',
settings["templates"]["events"].format(sub, r),
logger=logger))
labels = pd.concat(labels).reset_index(drop=True)
fmri_data = pu.concat_imgs(imgs, logger=logger)
bg_image = pu.load_img(
os.path.join(PATHS['root'], 'derivatives', sub,
'reg', settings["templates"]["reg"]),
logger=logger
)
return fmri_data, labels, bg_image
#
# def load_data_pymvpa(sub, maskname, normalize=True, logger=None):
# imgFile = os.path.join(PATHS['root'], 'derivatives', sub, 'betas',
# pu.format_bids_name(sub, 'task-analogy',
# 'betas-pymvpa.nii.gz'))
# mask = pu.load_img(PATHS['root'],
# 'derivatives', sub, 'masks',
# maskname + '.nii.gz', logger=logger)
# labels = pu.load_labels(PATHS['root'],
# 'derivatives', sub, 'betas',
# pu.format_bids_name(sub, 'task-analogy',
# 'events-pymvpa.tsv'),
# sep='\t', index_col=0, logger=logger)
# maskedImg = pa.mask_img(imgFile, mask)
# # conditionSelector = np.where(labels['ab'] == 1)
#
# if normalize:
# resids = []
# # whiten the data
# for r in range(8):
# residFile = os.path.join(PATHS['root'], 'derivatives', sub, 'func',
# pu.format_bids_name(sub, 'task-analogy',
# "run-0{}".format(r+1),
# 'resids-pymvpa.nii.gz'))
# resids.append(pa.mask_img(residFile, mask, logger=logger))
#
# fmri_data = rsa.noise_normalize_beta(maskedImg,
# np.vstack(resids), logger=logger)
# # fmri_data = rsa.noise_normalize_beta(maskedImg[conditionSelector],
# # np.vstack(resids), logger=logger)
# else:
# # fmri_data = maskedImg[conditionSelector]
# fmri_data = maskedImg
#
# # these_labels = labels.iloc[conditionSelector]
# bg_image = pu.load_img(os.path.join(PATHS['root'],
# 'derivatives',
# sub, 'reg', 'BOLD_template.nii.gz'), logger=logger)
# return fmri_data, labels, bg_image, mask
#
#
# subrel utils
# def load_data_subrel(sub, maskname, t="cope", normalize=False, logger=None):
# mask = pu.load_img(PATHS['root'],
# 'derivatives', sub, 'masks',
# maskname + '.nii.gz', logger=logger)
#
# labels = []
# imgs = []
# # whiten the data
# for r in range(8):
# imgFile = os.path.join(PATHS['root'], 'derivatives', sub, 'betas',
# "{}_task-analogy_run-0{}_betas-{}-subrel.nii.gz".format(sub, r+1, t))
# labels.append(pu.load_labels(PATHS["root"],
# 'derivatives', sub, 'betas',
# "{}_task-analogy_run-0{}_events-subrel.tsv".format(sub, r+1),
# sep='\t', logger=logger))
# labels[r]["chunks"] = pd.Series([r+1 for i in range(len(labels[r]))])
#
# if normalize:
# residFile = os.path.join(PATHS['root'], 'derivatives', sub, 'func',
# pu.format_bids_name(sub, 'task-analogy',
# "run-0{}".format(r+1),
# 'resids-subrel.nii.gz'))
# maskedImg = pa.mask_img(imgFile, mask, logger=logger)
# resids = pa.mask_img(residFile, mask, logger=logger)
# imgs.append(rsa.noise_normalize_beta(maskedImg, resids, logger=logger))
# else:
# imgs.append(pa.mask_img(imgFile, mask, logger=logger))
#
# # resids.append(pa.mask_img(residFile, mask))
# # run_data = noise_normalize_beta()
# labels = pd.concat(labels).reset_index(drop=True)
#
# fmri_data = np.vstack(imgs)
#
# bg_image = pu.load_img(os.path.join(PATHS['root'],
# 'derivatives',
# sub, 'reg', 'BOLD_template.nii.gz'), logger=logger)
# return fmri_data, labels, bg_image, mask
#
#
# # LSS utils
# def load_data_lss(sub, maskname, t="cope", normalize=False, logger=None):
# mask = pu.load_img(PATHS['root'],
# 'derivatives', sub, 'masks',
# maskname + '.nii.gz', logger=logger)
#
# labels = []
# imgs = []
# # whiten the data
# for r in range(8):
# imgFile = os.path.join(PATHS['root'], 'derivatives', sub, 'betas',
# "{}_task-analogy_run-0{}_betas-{}-LSS.nii.gz".format(sub, r+1, t))
# labels.append(pu.load_labels(PATHS["root"],
# 'derivatives', sub, 'func',
# "{}_task-analogy_run-0{}_events.tsv".format(sub, r+1),
# sep='\t', logger=logger))
# labels[r]["chunks"] = pd.Series([r+1 for i in range(len(labels[r]))])
#
# residFile = os.path.join(PATHS['root'], 'derivatives', sub, 'func',
# pu.format_bids_name(sub, 'task-analogy',
# "run-0{}".format(r+1),
# 'resids-lss.nii.gz'))
# if normalize:
# maskedImg = pa.mask_img(imgFile, mask, logger=logger)
# resids = pa.mask_img(residFile, mask, logger=logger)
# imgs.append(rsa.noise_normalize_beta(maskedImg, resids, logger=logger))
# else:
# imgs.append(pa.mask_img(imgFile, mask, logger=logger))
#
# # resids.append(pa.mask_img(residFile, mask))
# # run_data = noise_normalize_beta()
# labels = pd.concat(labels).reset_index(drop=True)
#
# mainrels = []
# subrels = []
# for _, r in labels.iterrows():
# if r["CD"] == 1:
# mainrels.append(r["CDMainRel"])
# subrels.append(r["CDSubRel"])
# elif r["AB"] == 1:
# mainrels.append(r["ABMainRel"])
# subrels.append(r["ABSubRel"])
# else:
# mainrels.append("None")
# subrels.append("None")
#
# labels["MainRel"] = mainrels
# labels["SubRel"] = subrels
# fmri_data = np.vstack(imgs)
#
# bg_image = pu.load_img(os.path.join(PATHS['root'],
# 'derivatives',
# sub, 'reg', 'BOLD_template.nii.gz'), logger=logger)
# return fmri_data, labels, bg_image, mask
#
# def reorder_data_pymvpa(data, old_order, new_order):
# return np.vstack([data[old_order.trialtag == v] for v in new_order.TrialTag])
#
#
# def reorder_data_lss(data, old_order, new_order):
# return np.vstack([data[old_order.TrialTag == v] for v in new_order.TrialTag])
#
#
# def reorder_data_subrel(data, old_order=None, new_order=None):
# return data[old_order.index]
#
#
# def generate_rdms(method, maskname, order,
# loadfunc, loadfunc_args=None, reorderfunc=reorder_data_lss,
# logger=None):
# full = []
# for sub in projectSettings["subjects"].keys():
# fmri_data, these_labels, _, _ = loadfunc(sub, maskname, **loadfunc_args)
# fmri_reordered_data = reorderfunc(fmri_data, these_labels, order)
# if method == "avg":
# full.append(pa.rdm((fmri_reordered_data[::2] +
# fmri_reordered_data[1::2])/2,
# metric="euclidean", logger=logger))
# else:
# full.append(pa.rdm(fmri_reordered_data, metric="euclidean", logger=logger))
# return np.array(full)
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
from .chartsheet import Chartsheet
|
import random
import cv2
import numpy as np
from vision.find_car_number import FindCarNumber
class Vision:
def __init__(self, image_path):
self.image = cv2.imread(image_path)
def get_car_number_pos(self):
"""获取车牌位置"""
c = FindCarNumber(image=self.image).find_card_pos()
return c["pos"]
def cropped_image(self, pos):
"""裁剪图片至车牌"""
try:
pos = pos[0]
except IndexError:
raise ValueError("Plate not found!")
cropped = self.image[pos[0][1]:pos[1][1], pos[0][0]:pos[1][0]]
return cropped
@staticmethod
def divided_image(cropped):
"""分割车牌"""
try:
divided = FindCarNumber.cut_text_peak(cropped)
except IndexError as e: # 无法使用峰值法,则用裁剪法
print(e)
divided = FindCarNumber.cut_text(cropped)
for key in divided:
# 调整大小
# divided[key] = cv2.resize(
# divided[key],
# (32, 32),
# interpolation=cv2.INTER_CUBIC
# )
# 高斯
# divided[key] = cv2.GaussianBlur(
# divided[key],
# (3, 3), 1
# )
# 保留蓝色区域
lower_blue = np.array([0, 30, 180])
higher_blue = np.array([360, 200, 360])
divided[key] = cv2.cvtColor(divided[key], cv2.COLOR_BGR2HSV)
divided[key] = cv2.inRange(divided[key], lower_blue, higher_blue)
# 保存图片
if key == "region":
cv2.imwrite("demo_images/regions/%s.jpg" % random.randint(1, 999999999), divided[key])
else:
cv2.imwrite("demo_images/chars/%s.jpg" % random.randint(1, 999999999), divided[key])
return divided
|
# 再パラメータ化 reparameterization
# 目的: サンプリングの効率化
# データのスケーリングもreparameterizationの一種
# 最初は極端な例、"Nealの漏斗"を見てみる。
import numpy as np
import seaborn as sns
import pandas
import matplotlib.pyplot as plt
import mcmc_tools
import scipy.stats as stats
from scipy.stats import norm
import random
# Nealの漏斗
# データが無く、事前分布がそのまま事後分布になる。
# 対数事後分布がいびつだとうまくサンプリングできない、ということの例なのだが、なかなか理解難しいな。
# 対数事後分布の値によってサンプリングするわけなので、影響があるのは理解できる。
# 対策
# 分布からスケールを切り離す。
stan_data = {}
filename = '../model/model10-3-1'
mcmc_result_b = mcmc_tools.sampling(filename, stan_data, n_jobs=4, seed=123)
mcmc_sample = mcmc_result_b.extract()
# 2次元メッシュの書き方を復習
X, Y = np.mgrid[-5:6, -5:6]
print(X.shape)
print(X)
print(Y.shape)
print(Y)
# print(X.ravel())
pts = np.c_[X.ravel(), Y.ravel()]
# print(pts[:,0])
data = X+2*Y
# print(data.shape)
# print(data)
plt.contourf(X, Y, data)
plt.show()
# サンプリングの結果を使って2次元メッシュを作成する
xx, yy = np.mgrid[-5:5:30j, -5:5:30j]
x = xx.ravel()
y = yy.ravel()
# メッシュの座標をすべて軸ごとに1つの配列にいれたものを用意
print(x)
# 座標ごとに対数事後確率を計算
lp = np.log(stats.norm.pdf(yy, loc=0, scale=3)) + np.log(stats.norm.pdf(xx, loc=0, scale=np.exp(yy/2)))
lp[lp < -15] = -15
plt.contourf(xx, yy, lp, vmin=-15, vmax=0)
plt.scatter(mcmc_sample['r'][:, 0], mcmc_sample['a'], s=1, c='k')
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.show()
plt.close()
# 再パラメータ化したもの
# これは、もとのパラメータを独立な正規分布からサンプリングして、サンプリング時に計算される
# 対数事後分布の大きさによってサンプリングが偏らないようにできるというもの
filename = '../model/model10-3-1-b'
mcmc_result_b = mcmc_tools.sampling(filename, stan_data, n_jobs=4, seed=123)
mcmc_sample_b = mcmc_result_b.extract()
lp = np.log(stats.norm.pdf(yy, loc=0, scale=3)) + np.log(stats.norm.pdf(xx, loc=0, scale=np.exp(yy/2)))
lp[lp < -15] = -15
plt.contourf(xx, yy, lp, vmin=-15, vmax=0)
plt.scatter(mcmc_sample_b['r'][:, 0], mcmc_sample_b['a'], s=1, c='k')
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.show()
|
masses = [
54755,
96495,
111504,
53923,
118158,
118082,
137413,
135315,
87248,
127646,
79201,
52399,
77966,
129568,
63880,
128973,
55491,
111226,
126447,
87017,
112469,
83975,
51280,
60239,
120524,
57122,
136517,
117378,
93629,
55125,
68990,
70336,
115119,
68264,
148122,
70075,
106770,
54976,
123852,
61813,
113373,
53924,
59660,
67111,
52825,
81568,
110842,
134870,
135529,
78689,
129451,
96041,
91627,
70863,
100098,
121908,
96623,
143752,
149936,
116283,
149488,
126158,
106499,
124927,
109574,
70711,
139078,
67212,
124251,
123803,
73569,
145668,
96045,
59748,
123238,
68005,
121412,
97236,
104800,
86786,
141680,
123807,
82310,
76593,
146092,
82637,
92339,
93821,
56247,
58328,
90159,
105700,
57317,
69011,
125544,
102372,
63797,
92127,
111207,
77596,
]
def fuel_need(mass: int):
return max(mass // 3 - 2, 0)
def fuel_need_recursive(mass: int, current: int):
added_fuel = fuel_need(mass)
if added_fuel > 0:
return fuel_need_recursive(added_fuel, current + added_fuel)
else:
return current
if __name__ == "__main__":
# Part 1
fuel = sum([fuel_need(x) for x in masses])
print("Part 1: Required fuel: ", fuel)
# Part 2, also take into account the m ass of fuel
fuel2 = sum([fuel_need_recursive(x, 0) for x in masses])
print("Part 2: Required fuel: ", fuel2)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class SuningBookPipeline(object):
list = []
def process_item(self, item, spider):
SuningBookPipeline.list.append(item)
if len(SuningBookPipeline.list) > 200:
self.write_file(SuningBookPipeline.list)
SuningBookPipeline.list.clear()
# print(str(item))
return item
def write_file(self, list):
# print(list)
with open("book.txt", "a") as f:
for i in list:
f.write(str(i))
f.write("\r\n")
|
from flask_restplus import Resource
from flask_restplus.namespace import Namespace
from restplus.api.v1.helpers import safe_user_output
from restplus.models import users_list
users_ns = Namespace('users')
class AllUsers(Resource):
def get(self):
"""
View all users
"""
users = []
for user in users_list:
users.append(safe_user_output(self, user))
return dict(users=users)
|
#
# PySNMP MIB module SYMMCOMMONPPSTOD (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/neermitt/Dev/kusanagi/mibs.snmplabs.com/asn1/SYMMCOMMONPPSTOD
# Produced by pysmi-0.3.4 at Tue Jul 30 11:34:54 2019
# On host NEERMITT-M-J0NV platform Darwin version 18.6.0 by user neermitt
# Using Python version 3.7.4 (default, Jul 9 2019, 18:13:23)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
ifNumber, ifIndex = mibBuilder.importSymbols("IF-MIB", "ifNumber", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
iso, TimeTicks, ObjectIdentity, Integer32, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Unsigned32, MibIdentifier, NotificationType, ModuleIdentity, Bits, IpAddress, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "TimeTicks", "ObjectIdentity", "Integer32", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Unsigned32", "MibIdentifier", "NotificationType", "ModuleIdentity", "Bits", "IpAddress", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
symmPhysicalSignal, = mibBuilder.importSymbols("SYMM-COMMON-SMI", "symmPhysicalSignal")
symmPPSTOD = ModuleIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3))
if mibBuilder.loadTexts: symmPPSTOD.setLastUpdated('201509301433Z')
if mibBuilder.loadTexts: symmPPSTOD.setOrganization('Symmetricom')
if mibBuilder.loadTexts: symmPPSTOD.setContactInfo('Symmetricom Technical Support 1-888-367-7966 toll free USA 1-408-428-7907 worldwide Support@symmetricom.com ')
if mibBuilder.loadTexts: symmPPSTOD.setDescription('Symmetricom, Inc. Common PPS/TOD input/output status and configuration ')
class EnaValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enable", 1), ("disable", 2))
class TODPortType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("disable", 0), ("normal", 1), ("error", 2))
class TPModuleID(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))
namedValues = NamedValues(("sys", 1), ("imc", 2), ("ioc1", 3), ("ioc2", 4), ("exp0", 5), ("exp1", 6), ("exp2", 7), ("exp3", 8), ("exp4", 9), ("exp5", 10), ("exp6", 11), ("exp7", 12), ("exp8", 13), ("exp9", 14))
class OnValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("on", 1), ("off", 2))
class TPOutputType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("outputGeneral", 1), ("output10Mhz", 2), ("outputPPS", 3))
class TPOutputGeneration(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("warmup", 1), ("freerun", 2), ("fastlock", 3), ("normal", 4))
class OutputFrameType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("freq1544khz", 1), ("freq2048khz", 2), ("ccs", 3), ("cas", 4), ("d4", 5), ("esf", 6))
class ActionApply(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("apply", 1), ("nonapply", 2))
class OpMode(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("auto", 1), ("manual", 2))
class ActiveValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("active", 1), ("inactive", 2))
class InputQualityLevel(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))
namedValues = NamedValues(("prcprs", 1), ("unkstu", 2), ("typeiist2", 3), ("typei", 4), ("typevtnc", 5), ("typeiiist3e", 6), ("typeivst3", 7), ("opt3smc", 8), ("dus", 9))
class InputPriority(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4)
class YesValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("yes", 1), ("no", 2))
class OkValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("ok", 1), ("fault", 2))
class ValidValue(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("valid", 1), ("invalid", 2), ("nurture", 3))
class TODInputFrameType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("chinaMobile", 1), ("ntp4", 2))
class DateAndTime(TextualConvention, OctetString):
description = "A date-time specification. field octets contents range ----- ------ -------- ----- 1 1-2 year* 0..65536 2 3 month 1..12 3 4 day 1..31 4 5 hour 0..23 5 6 minutes 0..59 6 7 seconds 0..60 (use 60 for leap-second) 7 8 deci-seconds 0..9 8 9 direction from UTC '+' / '-' 9 10 hours from UTC* 0..13 10 11 minutes from UTC 0..59 * Notes: - the value of year is in network-byte order - daylight saving time in New Zealand is +13 For example, Tuesday May 26, 1992 at 1:30:15 PM EDT would be displayed as: 1992-5-26,13:30:15.0,-4:0 Note that if only local time is known, then timezone information (fields 8-10) is not present."
status = 'current'
displayHint = '2d-1d-1d,1d:1d:1d.1d,1a1d:1d'
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(11, 11), )
class TLatAndLon(TextualConvention, OctetString):
description = "antenna latitude and longitude specification. field octets contents range ----- ------ -------- ----- 1 1 +/-180 deg '+' / '-' 2 2 degree 0..180 3 3 minute 0..59 4 4 second 0..59 5 5 second fraction 0..99 +/- dd:mm:ss.ss "
status = 'current'
displayHint = '1a1d:1d:1d.1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(5, 5)
fixedLength = 5
class TAntHeight(TextualConvention, OctetString):
description = "antenna height specification. field octets contents range ----- ------ -------- ----- 1 1 +/- '+' / '-' 2 2-3 meter 0..10000 3 4 meter fraction 0..99 +/- hh.hh "
status = 'current'
displayHint = '1a2d.1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class TLocalTimeOffset(TextualConvention, OctetString):
description = "A local time offset specification. field octets contents range ----- ------ -------- ----- 1 1 direction from UTC '+' / '-' 2 2 hours from UTC* 0..13 3 3 minutes from UTC 0..59 * Notes: - the value of year is in network-byte order - The hours range is 0..13 For example, the -6 local time offset would be displayed as: -6:0 "
status = 'current'
displayHint = '1a1d:1d'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 3)
fixedLength = 3
class TSsm(TextualConvention, Integer32):
description = 'The ssm hex code'
status = 'current'
displayHint = 'x'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
ppstodInput = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1))
ppstodInputStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1))
ppstodInputStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1), )
if mibBuilder.loadTexts: ppstodInputStatusTable.setStatus('current')
if mibBuilder.loadTexts: ppstodInputStatusTable.setDescription('Description.')
ppstodInputStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMMCOMMONPPSTOD", "ppstodInputStatusIndex"))
if mibBuilder.loadTexts: ppstodInputStatusEntry.setStatus('current')
if mibBuilder.loadTexts: ppstodInputStatusEntry.setDescription('Description.')
ppstodInputStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10000)))
if mibBuilder.loadTexts: ppstodInputStatusIndex.setStatus('current')
if mibBuilder.loadTexts: ppstodInputStatusIndex.setDescription('Description.')
ppstodInputPortStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 2), TODPortType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputPortStatus.setStatus('current')
if mibBuilder.loadTexts: ppstodInputPortStatus.setDescription('Description.')
ppstodInputPPSStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputPPSStatus.setStatus('current')
if mibBuilder.loadTexts: ppstodInputPPSStatus.setDescription('Description.')
ppstodInputPhaseOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputPhaseOffset.setStatus('current')
if mibBuilder.loadTexts: ppstodInputPhaseOffset.setDescription('Description.')
ppstodInputClockSourceType = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputClockSourceType.setStatus('current')
if mibBuilder.loadTexts: ppstodInputClockSourceType.setDescription('Description.')
ppstodInputClockSourceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputClockSourceStatus.setStatus('current')
if mibBuilder.loadTexts: ppstodInputClockSourceStatus.setDescription('Description.')
ppstodInputAccuracy = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputAccuracy.setStatus('current')
if mibBuilder.loadTexts: ppstodInputAccuracy.setDescription('Description.')
ppstodInputAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 1, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ppstodInputAlarm.setStatus('current')
if mibBuilder.loadTexts: ppstodInputAlarm.setDescription('Description.')
ppstodInputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2))
ppstodInputTable = MibTable((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2, 1), )
if mibBuilder.loadTexts: ppstodInputTable.setStatus('current')
if mibBuilder.loadTexts: ppstodInputTable.setDescription('The pps-tod configuration table.')
ppstodInputEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "SYMMCOMMONPPSTOD", "ppstodInputIndex"))
if mibBuilder.loadTexts: ppstodInputEntry.setStatus('current')
if mibBuilder.loadTexts: ppstodInputEntry.setDescription('The entry of pps-tod table.')
ppstodInputIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1000)))
if mibBuilder.loadTexts: ppstodInputIndex.setStatus('current')
if mibBuilder.loadTexts: ppstodInputIndex.setDescription('Description.')
ppstodInputCableDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ppstodInputCableDelay.setStatus('current')
if mibBuilder.loadTexts: ppstodInputCableDelay.setDescription('The input cable delay value. The valid setting is 0-999999. ')
ppstodInputFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2, 1, 1, 6), TODInputFrameType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ppstodInputFormat.setStatus('current')
if mibBuilder.loadTexts: ppstodInputFormat.setDescription('TOD input format can be following chinaMobile(1) NTP-4(2) NotAvailable(0)')
ppstodInputManualLeapSeconds = MibTableColumn((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(20, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ppstodInputManualLeapSeconds.setStatus('current')
if mibBuilder.loadTexts: ppstodInputManualLeapSeconds.setDescription('ManualLeap second')
ppstodOutput = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 2))
ppstodOutputStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 2, 1))
ppstodOutputConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 2, 2))
ppstodConformance = ObjectIdentity((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 3))
if mibBuilder.loadTexts: ppstodConformance.setStatus('current')
if mibBuilder.loadTexts: ppstodConformance.setDescription('This subtree contains conformance statements for the Symmetricom PPS-TOD MIB. ')
ppstodCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 3, 1))
ppstodBasicCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 3, 1, 1)).setObjects(("SYMMCOMMONPPSTOD", "ppstodInputConfigGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ppstodBasicCompliance = ppstodBasicCompliance.setStatus('current')
if mibBuilder.loadTexts: ppstodBasicCompliance.setDescription('The compliance statement for SNMP entities which have PPS-TOD status and configuration of input/output.')
ppstodUocGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 3, 2))
ppstodInputConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9070, 1, 2, 5, 2, 3, 3, 2, 1)).setObjects(("SYMMCOMMONPPSTOD", "ppstodInputCableDelay"), ("SYMMCOMMONPPSTOD", "ppstodInputFormat"), ("SYMMCOMMONPPSTOD", "ppstodInputManualLeapSeconds"), ("SYMMCOMMONPPSTOD", "ppstodInputPortStatus"), ("SYMMCOMMONPPSTOD", "ppstodInputPPSStatus"), ("SYMMCOMMONPPSTOD", "ppstodInputClockSourceType"), ("SYMMCOMMONPPSTOD", "ppstodInputPhaseOffset"), ("SYMMCOMMONPPSTOD", "ppstodInputClockSourceStatus"), ("SYMMCOMMONPPSTOD", "ppstodInputAccuracy"), ("SYMMCOMMONPPSTOD", "ppstodInputAlarm"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ppstodInputConfigGroup = ppstodInputConfigGroup.setStatus('current')
if mibBuilder.loadTexts: ppstodInputConfigGroup.setDescription('A collection of objects providing information applicable to PPS-TOD configuration group.')
mibBuilder.exportSymbols("SYMMCOMMONPPSTOD", ppstodInputStatus=ppstodInputStatus, ppstodOutputConfig=ppstodOutputConfig, TPOutputGeneration=TPOutputGeneration, TAntHeight=TAntHeight, YesValue=YesValue, TODInputFrameType=TODInputFrameType, OkValue=OkValue, TPOutputType=TPOutputType, ppstodInputManualLeapSeconds=ppstodInputManualLeapSeconds, ActiveValue=ActiveValue, EnaValue=EnaValue, TLatAndLon=TLatAndLon, ppstodInputPPSStatus=ppstodInputPPSStatus, ppstodInputTable=ppstodInputTable, symmPPSTOD=symmPPSTOD, ppstodInputStatusEntry=ppstodInputStatusEntry, ppstodInputAlarm=ppstodInputAlarm, ppstodInputClockSourceType=ppstodInputClockSourceType, TPModuleID=TPModuleID, ppstodOutputStatus=ppstodOutputStatus, ppstodCompliances=ppstodCompliances, TLocalTimeOffset=TLocalTimeOffset, InputQualityLevel=InputQualityLevel, TSsm=TSsm, OpMode=OpMode, ppstodInputPhaseOffset=ppstodInputPhaseOffset, ppstodInputAccuracy=ppstodInputAccuracy, ppstodInputFormat=ppstodInputFormat, ppstodUocGroups=ppstodUocGroups, ppstodInputIndex=ppstodInputIndex, ppstodInputStatusIndex=ppstodInputStatusIndex, ppstodOutput=ppstodOutput, OnValue=OnValue, ppstodInputClockSourceStatus=ppstodInputClockSourceStatus, PYSNMP_MODULE_ID=symmPPSTOD, ppstodInputConfig=ppstodInputConfig, ppstodInputPortStatus=ppstodInputPortStatus, ppstodInputConfigGroup=ppstodInputConfigGroup, TODPortType=TODPortType, ppstodBasicCompliance=ppstodBasicCompliance, ppstodInput=ppstodInput, ppstodConformance=ppstodConformance, ActionApply=ActionApply, ppstodInputEntry=ppstodInputEntry, OutputFrameType=OutputFrameType, ppstodInputCableDelay=ppstodInputCableDelay, InputPriority=InputPriority, ValidValue=ValidValue, DateAndTime=DateAndTime, ppstodInputStatusTable=ppstodInputStatusTable)
|
"""ABOUT
"""
APP_NAME = "pyswd"
VERSION = "v1.0.0"
AUTHOR = "Pavel Revak"
AUTHOR_EMAIL = "pavel.revak@gmail.com"
DESCRIPTION = "SWD debugging tool"
URL = "https://github.com/pavelrevak/pyswd"
|
# sum of digits of number
def sum_digit(n):
s = 0
while n != 0:
s += n % 10
n //= 10
return s
if __name__ == "__main__":
print(sum_digit(1234))
|
import unittest
from mock import patch
import gevent
import gevent.queue
from steam.core.cm import CMClient
class CMClient_Scenarios(unittest.TestCase):
test_channel_key = b'SESSION KEY LOL'
def setUp(self):
# mock out crypto
patcher = patch('steam.core.crypto.generate_session_key')
self.addCleanup(patcher.stop)
self.gen_skey = patcher.start()
self.gen_skey.return_value = (self.test_channel_key, b'PUBKEY ENCRYPTED SESSION KEY')
patcher = patch('steam.core.crypto.symmetric_encrypt')
self.addCleanup(patcher.stop)
self.s_enc = patcher.start()
self.s_enc.side_effect = lambda m, k: m
patcher = patch('steam.core.crypto.symmetric_encrypt_HMAC')
self.addCleanup(patcher.stop)
self.s_enc_hmac = patcher.start()
self.s_enc_hmac.side_effect = lambda m, k, mac: m
patcher = patch('steam.core.crypto.symmetric_decrypt')
self.addCleanup(patcher.stop)
self.s_dec = patcher.start()
self.s_dec.side_effect = lambda c, k: c
patcher = patch('steam.core.crypto.symmetric_decrypt_HMAC')
self.addCleanup(patcher.stop)
self.s_dec_hmac = patcher.start()
self.s_dec_hmac.side_effect = lambda c, k, mac: c
# mock out TCPConnection
patcher = patch('steam.core.cm.TCPConnection', autospec=True)
self.addCleanup(patcher.stop)
self.conn = patcher.start().return_value
self.conn_in = gevent.queue.Queue()
self.conn.__iter__.return_value = self.conn_in
# mock out CMServerList
patcher = patch('steam.core.cm.CMServerList', autospec=True)
self.addCleanup(patcher.stop)
self.server_list = patcher.start().return_value
self.server_list.__iter__.return_value = [(127001, 20000+i) for i in range(10)]
self.server_list.bootstrap_from_webapi.return_value = False
self.server_list.bootstrap_from_dns.return_value = False
@patch.object(CMClient, 'emit')
@patch.object(CMClient, '_recv_messages')
def test_connect(self, mock_recv, mock_emit):
# setup
self.conn.connect.return_value = True
self.server_list.__len__.return_value = 10
# run
cm = CMClient()
with gevent.Timeout(2, False):
cm.connect(retry=1)
gevent.idle()
# verify
self.conn.connect.assert_called_once_with((127001, 20000))
mock_emit.assert_called_once_with('connected')
mock_recv.assert_called_once_with()
@patch.object(CMClient, 'emit')
@patch.object(CMClient, '_recv_messages')
def test_connect_auto_discovery_failing(self, mock_recv, mock_emit):
# setup
self.conn.connect.return_value = True
self.server_list.__len__.return_value = 0
# run
cm = CMClient()
with gevent.Timeout(3, False):
cm.connect(retry=1)
gevent.idle()
# verify
self.server_list.bootstrap_from_webapi.assert_called_once_with()
self.server_list.bootstrap_from_dns.assert_called_once_with()
self.conn.connect.assert_not_called()
@patch.object(CMClient, 'emit')
@patch.object(CMClient, '_recv_messages')
def test_connect_auto_discovery_success(self, mock_recv, mock_emit):
# setup
self.conn.connect.return_value = True
self.server_list.__len__.return_value = 0
def fake_servers(*args, **kwargs):
self.server_list.__len__.return_value = 10
return True
self.server_list.bootstrap_from_webapi.side_effect = fake_servers
# run
cm = CMClient()
with gevent.Timeout(3, False):
cm.connect(retry=1)
gevent.idle()
# verify
self.server_list.bootstrap_from_webapi.assert_called_once_with()
self.server_list.bootstrap_from_dns.assert_not_called()
self.conn.connect.assert_called_once_with((127001, 20000))
mock_emit.assert_called_once_with('connected')
mock_recv.assert_called_once_with()
def test_channel_encrypt_sequence(self):
# setup
self.conn.connect.return_value = True
# run ------------
cm = CMClient()
cm.connected = True
gevent.spawn(cm._recv_messages)
# recieve ChannelEncryptRequest
self.conn_in.put(b'\x17\x05\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x01\x00\x00\x00')
gevent.idle(); gevent.idle(); gevent.idle(); gevent.idle()
self.conn.put_message.assert_called_once_with(b'\x18\x05\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00\x80\x00\x00\x00PUBKEY ENCRYPTED SESSION KEY\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h-\xc4@\x00\x00\x00\x00')
# recieve ChannelEncryptResult (OK)
self.conn_in.put(b'\x19\x05\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x00\x00\x00')
cm.wait_event('channel_secured', timeout=2, raises=True)
|
from abc import ABCMeta, abstractmethod
class Explainer(metaclass=ABCMeta):
@abstractmethod
def __init__(self, model, data, feature_names=None, target_names=None):
raise NotImplementedError
@abstractmethod
def explain(self, X, y):
raise NotImplementedError |
import logging
import multiprocessing
import os
from bootleg.utils import train_utils
def get_log_name(args, mode):
log_name = os.path.join(train_utils.get_save_folder(args.run_config), f"log_{mode}")
log_name += train_utils.get_file_suffix(args)
log_name += f'_gpu{args.run_config.gpu}'
return log_name
def create_logger(args, mode):
if args.run_config.distributed:
logger = logging.getLogger("bootleg")
else:
logger = logging.getLogger("bootleg")
# set logging level
numeric_level = getattr(logging, args.run_config.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.run_config.loglevel.upper())
logger.setLevel(numeric_level)
# do not propagate messages to the root logger
logger.propagate = False
log_name = get_log_name(args, mode)
if not os.path.exists(log_name): os.system("touch " + log_name)
if not logger.hasHandlers():
formatter = logging.Formatter('%(asctime)s %(message)s')
fh = logging.FileHandler(log_name, mode='w' if mode == 'train' else 'a')
fh.setFormatter(formatter)
logger.addHandler(fh)
# only print the stream for the first GPU
if args.run_config.gpu == 0:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
else:
print('Something went wrong in the logger')
exit()
return logger
def get_logger(args):
if args.run_config.distributed:
return logging.getLogger("bootleg")
else:
return logging.getLogger("bootleg") |
from pycipher.caesar import Caesar
import unittest
class TestCaesar(unittest.TestCase):
def test_decipher(self):
''' Caesar (test_decipher): test known ciphertext->plaintext pairs '''
text = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
declist = ['xyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvw',
'vwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstu',
'stuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqr',
'pqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmno',
'lmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijk',
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
'bcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyza']
for i,key in enumerate((3,5,8,11,15,0,25)):
dec = Caesar(key).decipher(text)
self.assertEqual(dec.upper(), declist[i].upper())
def test_encipher(self):
''' Caesar (test_encipher): test known plaintext->ciphertext pairs '''
text = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
enclist = ['bcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyza',
'cdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzab',
'efghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcd',
'hijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefg',
'jklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghi',
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz',
'zabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy']
for i,key in enumerate((1,2,4,7,9,0,25)):
enc = Caesar(key).encipher(text)
self.assertEqual(enc.upper(), enclist[i].upper())
def test_punctuation(self):
''' Caesar (test_punctuation): punctuation should remain unmodified '''
e = Caesar(key=14)
original = '!@$%%^&*()_-+={}[]|":;<>,./?'
enciphered = e.encipher(original,keep_punct=True)
self.assertEqual(original.upper(), enciphered.upper())
e = Caesar(key=14)
original = '!@$%%^&*()_-+={}[]|":;<>,./?'
enciphered = e.encipher(original,keep_punct=False)
self.assertEqual('', enciphered.upper()) |
import os
import webbrowser as wb
def workstation():
codePath = "C:\\Program Files\\Sublime Text 3\\sublime_text.exe"#ADD THE PATH OF TXET EDITOR OR IDE HERE
os.startfile(codePath)
chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'#ADD THE PATH OF CHROME HERE
URLS = (
"stackoverflow.com",
"github.com/Arbazkhan4712",
"gmail.com",
"google.com",
"youtube.com"
)#ADD THE WEBSITES YOU USE WHIE WORKING
for url in URLS:
wb.get(chrome_path).open(url)
workstation()
|
import unittest
import pysal
from pysal.core.IOHandlers.gwt import GwtIO
import tempfile
import os
import warnings
class test_GwtIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal.examples.get_path('juvenile.gwt')
self.obj = GwtIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w = self.obj.read()
self.assertEqual(168, w.n)
self.assertEqual(16.678571428571427, w.mean_neighbors)
w.transform = 'B'
self.assertEqual([1.0], w[1].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
# Commented out by CRS, GWT 'w' mode removed until we can find a good solution for retaining distances.
# see issue #153.
# Added back by CRS,
def test_write(self):
w = self.obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.gwt', dir=pysal.examples.get_path(''))
fname = f.name
f.close()
o = pysal.open(fname, 'w')
#copy the shapefile and ID variable names from the old gwt.
# this is only available after the read() method has been called.
#o.shpName = self.obj.shpName
#o.varName = self.obj.varName
o.write(w)
o.close()
wnew = pysal.open(fname, 'r').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
from app.forms.serializers.forms import (
FieldInAnswerSerializer,
FormInSubmissionSerializer,
FormPolymorphicSerializer,
FormSerializer,
AnswerableFormSerializer,
EventFormSerializer,
OptionSerializer,
)
from app.forms.serializers.statistics import FormStatisticsSerializer
|
# @date 2022-03-13
# @author Frederic Scherma, All rights reserved without prejudices.
# @license Copyright (c) 2022 Dream Overflow
# Trader info command
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from trader.trader import Trader
from terminal.terminal import Terminal
import logging
logger = logging.getLogger('siis.trader')
error_logger = logging.getLogger('siis.error.trader')
def cmd_trader_froze_asset_quantity(trader: Trader, data: dict) -> dict:
"""
Lock a quantity of an asset to be not available for trading.
"""
results = {
'messages': [],
'error': False
}
asset_name = data.get('asset')
quantity = data.get('quantity', -1.0)
if not asset_name:
Terminal.inst().error("Asset to froze quantity must be specified")
if quantity < 0.0:
Terminal.inst().error("Asset quantity to froze must be specified and greater or equal to zero")
# @todo
return results
|
#!/usr/bin/env python
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import defaultdict
from StringIO import StringIO
from time import time as _orig_time
from random import randint
import json
from testlunr.unit import temp_disk_file
from lunr.storage.helper.utils import manifest
def mock_time(start=_orig_time()):
"""
Create a callable that will return a float that resembals a unix timestamp.
The timestamps returned will start at the given timestamp and subsequent
calls to the returned callable object will increase exponentially.
:params start: a timestamp as a float
:returns: a callable which cause be used as a replacement for time.time()
"""
def count(start):
growth = 1
while True:
yield start
start += growth
growth *= 2
# start counter
counter = count(start)
def mock():
# each call yields one value off the generator
return counter.next()
return mock
class ManifestTestCase(unittest.TestCase):
VERSION = '1.0'
def setUp(self):
start = _orig_time()
manifest.time = mock_time(start)
self.time = mock_time(start)
def tearDown(self):
manifest.time = _orig_time
class TestManifest(ManifestTestCase):
def test_create_manifest(self):
size = 10
m = manifest.Manifest()
self.assert_(isinstance(m, dict))
self.assert_(isinstance(m, manifest.Manifest))
self.assertEquals(m.version, self.VERSION)
self.assertEquals(m['version'], self.VERSION)
self.assert_(isinstance(m.salt, basestring))
self.assert_(len(m.salt) > 0)
# can't calculate m.block_count until you set base
self.assertFalse(hasattr(m, 'block_count'))
expected = [manifest.EMPTY_BLOCK for b in xrange(size)]
m.base = list(expected)
self.assertEquals(m.block_count, size)
self.assertEquals(m.replay(), expected)
def test_create_populate_empty_base(self):
size = 10
m = manifest.Manifest()
# can't populate m.base until you set block_count
self.assertFalse(hasattr(m, 'base'))
m.block_count = size
expected = [manifest.EMPTY_BLOCK for b in xrange(size)]
self.assertEquals(m.base, expected)
self.assertEquals(m.replay(), expected)
def test_create_blank_manifest(self):
size = 100
m = manifest.Manifest.blank(size)
self.assertEquals(m.backups, {})
self.assertEquals(m.block_count, size)
def test_create_backup(self):
size = 10
m = manifest.Manifest.blank(size)
backup = m.create_backup('id0')
volume = ['0000'] * size
for blockno, block in enumerate(volume):
backup[blockno] = block
self.assertEquals(m.base, volume)
self.assertEquals(m.get_backup('id0'), volume)
# mix it up a bit
volume[randint(0, size-1)] = '1111'
backup = m.create_backup('id1')
for blockno, block in enumerate(volume):
backup[blockno] = block
self.assertEquals(m.get_backup('id1'), volume)
self.assertEquals(m.get_backup('id0'), ['0000'] * size)
def test_replay(self):
size = 100
m = manifest.Manifest.blank(size)
# create initial backup
base = ['0000'] * size
backup = m.create_backup('base')
for blockno, block in enumerate(base):
backup[blockno] = block
base_ts = self.time()
# create backup1
backup1 = {1: '1111', 2: '1111'}
backup = m.create_backup('backup1')
for blockno, block in backup1.items():
backup[blockno] = block
backup1_ts = self.time()
backup2 = {1: '2222'}
backup = m.create_backup('backup2')
for blockno, block in backup2.items():
backup[blockno] = block
backup2_ts = self.time()
backup3 = {3: '3333'}
backup = m.create_backup('backup3')
for blockno, block in backup3.items():
backup[blockno] = block
backup3_ts = self.time()
expected = list(base)
self.assertEquals(m.replay(0), expected)
self.assertEquals(m.replay(base_ts), expected)
# update expected with first backup
for blockno, block in backup1.items():
expected[blockno] = block
# replay to first timestamp
self.assertEquals(m.replay(backup1_ts), expected)
# update expected with second backup
for blockno, block in backup2.items():
expected[blockno] = block
# replay to second timestamp
self.assertEquals(m.replay(backup2_ts), expected)
# test middle timestamps land on closest diff without going over
self.assertEquals(m.replay(backup3_ts - 1), expected)
# update expected with third backup
for blockno, block in backup3.items():
expected[blockno] = block
# replay to third timestamp
self.assertEquals(m.replay(backup3_ts), expected)
# test MOST_RECENT same as last timestamp
self.assertEquals(m.replay(), expected)
def test_delete_backup(self):
size = 100
m = manifest.Manifest.blank(size)
# create initial backup
base = ['0000'] * size
backup = m.create_backup('base')
for blockno, block in enumerate(base):
backup[blockno] = block
base_ts = self.time()
# create backup1
backup1 = {1: '1111', 2: '1111'}
backup = m.create_backup('backup1')
for blockno, block in backup1.items():
backup[blockno] = block
backup1_ts = self.time()
backup2 = {1: '2222'}
backup = m.create_backup('backup2')
for blockno, block in backup2.items():
backup[blockno] = block
backup2_ts = self.time()
backup3 = {3: '3333'}
backup = m.create_backup('backup3')
for blockno, block in backup3.items():
backup[blockno] = block
backup3_ts = self.time()
# replay all backups
expected = list(base)
for backup in (backup1, backup2, backup3):
for blockno, block in backup.items():
expected[blockno] = block
# delete base
m.delete_backup('base')
self.assert_('base' not in m.backups)
self.assertEquals(m.replay(), expected)
# replay all backups 1 and 2
expected = list(base)
for backup in (backup1, backup2):
for blockno, block in backup.items():
expected[blockno] = block
# delete backup3
m.delete_backup('backup3')
self.assert_('backup3' not in m.backups)
self.assertEquals(m.replay(), expected)
class MockConnection(object):
def __init__(self):
self.data = defaultdict(dict)
def get_object(self, container, object_id, **kwargs):
try:
return {}, self.data[container][object_id]
except KeyError:
raise Exception('404')
def put_object(self, container, object_id, body):
self.data[container][object_id] = str(body)
class TestJSON(ManifestTestCase):
def test_basic_store(self):
size = 2
m = manifest.Manifest.blank(size)
backup = m.create_backup('id0')
for blockno in range(size):
backup[blockno] = '0000'
backup = m.create_backup('id1')
for blockno in range(0, size, 10):
backup[blockno] = '1111'
c = MockConnection()
with temp_disk_file() as lock_file:
manifest.save_manifest(m, c, 'vol1', lock_file)
expected = dict(m)
vol1 = m.get_backup('id1')
m = manifest.load_manifest(c, 'vol1', lock_file)
self.assertEquals(m, expected)
self.assertEquals(m.get_backup('id1'), vol1)
self.assertEquals(m.version, self.VERSION)
def test_version_mismatch(self):
m = manifest.Manifest.blank(1)
c = MockConnection()
stuff = {
0: [],
'backups': {},
'version': '2.0'
}
c.put_object('vol1', 'manifest', json.dumps(stuff))
with temp_disk_file() as lock_file:
self.assertRaises(manifest.ManifestVersionError,
manifest.load_manifest, c, 'vol1', lock_file)
def test_load_salt(self):
m = manifest.Manifest.blank(1)
c = MockConnection()
salt = 'salty!'
stuff = {
0: [],
'backups': {},
'version': '1.0',
'salt': salt
}
c.put_object('vol1', 'manifest', json.dumps(stuff))
with temp_disk_file() as lock_file:
m = manifest.load_manifest(c, 'vol1', lock_file)
self.assertEquals(m.salt, salt)
def test_load_default_salt(self):
m = manifest.Manifest.blank(1)
c = MockConnection()
salt = 'salty!'
stuff = {
0: [],
'backups': {},
'version': '1.0'
}
c.put_object('vol1', 'manifest', json.dumps(stuff))
with temp_disk_file() as lock_file:
m = manifest.load_manifest(c, 'vol1', lock_file)
self.assertEquals(m.salt, '')
def test_integer_backup_id(self):
size = 2
m = manifest.Manifest.blank(size)
backup = m.create_backup(0)
for blockno in range(size):
backup[blockno] = '0000'
backup = m.create_backup(1)
for blockno in range(0, size, 10):
backup[blockno] = '1111'
backup = m.create_backup('id2')
for blockno in range(0, size, 10):
backup[blockno] = '2222'
c = MockConnection()
with temp_disk_file() as lock_file:
manifest.save_manifest(m, c, 'vol1', lock_file)
expected = dict(m)
vol1 = m.get_backup(1)
vol1_str = m.get_backup('1')
vol2 = m.get_backup('id2')
m = manifest.load_manifest(c, 'vol1', lock_file)
self.assertEquals(m, expected)
self.assertEquals(m.get_backup(1), vol1)
self.assertEquals(m.get_backup('1'), vol1_str)
self.assertEquals(vol1, vol1_str)
self.assertEquals(m.get_backup('id2'), vol2)
if __name__ == "__main__":
unittest.main()
|
import colorama
colorama.init()
print(colorama.Fore.RED + 'This is red')
from colorama import *
init()
print(Fore.YELLOW + 'This is yellow')
from colorama import init, Fore
print(Fore.GREEN + 'This is green')
|
# HACK: if the profile plugin is imported before the coverage plugin then all
# the top-level code in pytest_profiling will be omitted from
# coverage, so force it to be reloaded within this test unit under coverage
from six.moves import reload_module # @UnresolvedImport
import pytest_profiling
reload_module(pytest_profiling)
from pytest_profiling import Profiling, pytest_addoption, pytest_configure
try:
from unittest.mock import Mock, ANY, patch, sentinel
except ImportError:
# python 2
from mock import Mock, ANY, patch, sentinel
def test_creates_prof_dir():
with patch('os.makedirs', side_effect=OSError) as makedirs:
Profiling(False).pytest_sessionstart(Mock())
makedirs.assert_called_with('prof')
def test_combines_profs():
plugin = Profiling(False)
plugin.profs = [sentinel.prof0, sentinel.prof1]
with patch('pstats.Stats') as Stats:
plugin.pytest_sessionfinish(Mock(), Mock())
Stats.assert_called_once_with(sentinel.prof0)
Stats.return_value.add.assert_called_once_with(sentinel.prof1)
assert Stats.return_value.dump_stats.called
def test_generates_svg():
plugin = Profiling(True)
plugin.profs = [sentinel.prof]
with patch('pstats.Stats'):
with patch('pipes.Template') as Template:
plugin.pytest_sessionfinish(Mock(), Mock())
assert any('gprof2dot' in args[0][0] for args in Template.return_value.append.call_args_list)
assert Template.return_value.copy.called
def test_writes_summary():
plugin = Profiling(False)
plugin.profs = [sentinel.prof]
terminalreporter, stats = Mock(), Mock()
with patch('pstats.Stats', return_value=stats) as Stats:
plugin.pytest_sessionfinish(Mock(), Mock())
plugin.pytest_terminal_summary(terminalreporter)
assert 'Profiling' in terminalreporter.write.call_args[0][0]
assert Stats.called_with(stats, stream=terminalreporter)
def test_writes_summary_svg():
plugin = Profiling(True)
plugin.profs = [sentinel.prof]
terminalreporter = Mock()
with patch('pstats.Stats'):
with patch('pipes.Template'):
plugin.pytest_sessionfinish(Mock(), Mock())
plugin.pytest_terminal_summary(terminalreporter)
assert 'SVG' in terminalreporter.write.call_args[0][0]
def test_adds_options():
parser = Mock()
pytest_addoption(parser)
parser.getgroup.assert_called_with('Profiling')
group = parser.getgroup.return_value
group.addoption.assert_any_call('--profile', action='store_true', help=ANY)
group.addoption.assert_any_call('--profile-svg', action='store_true', help=ANY)
def test_configures():
config = Mock(getvalue=lambda x: x == 'profile')
with patch('pytest_profiling.Profiling') as Profiling:
pytest_configure(config)
config.pluginmanager.register.assert_called_with(Profiling.return_value)
def test_clean_filename():
assert pytest_profiling.clean_filename('a:b/c\256d') == 'a_b_c_d'
|
from predictor_caffe import PredictorCaffe
from predictor_mxnet import PredictorMxNet
import numpy as np
def compare_diff_sum(tensor1, tensor2):
pass
def compare_cosin_dist(tensor1, tensor2):
pass
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def compare_models(prefix_mxnet, prefix_caffe, size):
netmx = PredictorMxNet(prefix_mxnet, 0, size)
model_file = prefix_caffe + ".prototxt"
pretrained_file = prefix_caffe + ".caffemodel"
netcaffe = PredictorCaffe(model_file, pretrained_file, size)
tensor = np.ones(size, dtype=np.float32)
out_mx = netmx.forward(tensor)
out_mx = out_mx[0].asnumpy()
#print out_mx
img = (tensor-127.5) / 128
netcaffe.forward(img)
out_caffe = netcaffe.blob_by_name("fc1")
#print out_caffe.data
out_caffe_data = out_caffe.data.tolist()
#print(len(out_caffe_data[0]), len(out_mx[0][0]), type(out_caffe_data), type(out_mx))
for i in range(0, 10):
#print(out_mx[0][0][i], out_caffe_data[0][i])
print(out_mx[0][i], out_caffe_data[0][i])
print(type(netcaffe.net.blobs))
#print(netcaffe.net.blobs.keys())
blob_name = 'stage1_unit1_bn3' # 3
#blob_name = 'stage1_unit1_conv2'
blob_name = 'stage1_unit1_sc'
#blob_name = 'stage1_unit1_conv1sc'
#blob_name = 'relu0'
blob_name = 'stage1_unit2_relu1' #8
blob_name = 'stage4_unit3_bn3' #103
blob_name = 'bn1' # 105
#blob_name = 'pre_fc1'
blob_name = 'stage1_unit2_relu1'
blob_name = '_plus0' #6
blob_name = 'stage1_unit1_bn3' #3
blob_name = 'stage1_unit1_relu1' # 2
blob_name = 'stage1_unit1_bn1' # 1
#blob_name = 'relu0' # 0
out_caffe = netcaffe.blob_by_name(blob_name)
print(type(out_caffe.data), type(out_caffe.data.tolist()), len(out_caffe.data.flat))
count = 0
index = 0
for element in out_caffe.data.flat:
if count >= 10: break
if element > 0.001:
print(index, element)
count += 1
index += 1
for i in range(0, 5):
print(i, netcaffe.net.params[blob_name][0].data.flat[i], netcaffe.net.params[blob_name][1].data.flat[i],
netcaffe.net.params[blob_name + '_scale'][0].data.flat[i], netcaffe.net.params[blob_name + '_scale'][1].data.flat[i])
'''
index = 0
for _w in netcaffe.net.params['conv0'][0].data.flat:
if index < 10: print(index, _w)
else: break
index += 1
'''
sum0 = 0
sum1 = 0
for i in range(0, len(out_mx[0])):
#print(out_mx[0][0][i], out_caffe_data[0][i])
sum0 += out_mx[0][i] * out_mx[0][i]
sum1 += out_caffe_data[0][i] * out_caffe_data[0][i]
#print softmax(out_caffe.data)
#print softmax(out_caffe.data)
print(sum0, sum1);
print "done"
if __name__ == "__main__":
prefix_mxnet = "/media/luyao/video_send_back/arcface/model-r50-am-lfw/model"
#prefix_mxnet = "/var/darknet/insightface/models/model-r100-ii/model"
prefix_caffe = "/home/luyao/git/MXNet2Caffe/model_caffe/face/facega2"
size = (1, 3, 112, 112)
compare_models(prefix_mxnet, prefix_caffe, size)
|
import bmp280
from machine import Pin, I2C
from time import sleep
scl=Pin(5)
sda=Pin(4)
i2c=I2C(scl=scl, sda=sda)
print('Found on I2C bus: ', i2c.scan())
bmp=bmp280.BMP280(i2c)
while True:
print('t=', bmp.temperature, ' p=', bmp.pressure / 100.0)
sleep(5)
|
from typing import List, Literal
from pydantic import BaseModel, validator
from podping_hivewriter.models.medium import mediums
from podping_hivewriter.models.reason import reasons
class Podping(BaseModel):
"""Dataclass for on-chain podping schema"""
version: Literal["1.0"] = "1.0"
medium: str
reason: str
iris: List[str]
@validator("medium")
def medium_exists(cls, v):
"""Make sure the given medium matches what's available"""
if v not in mediums:
raise ValueError(f"medium must be one of {str(', '.join(mediums))}")
return v
@validator("reason")
def reason_exists(cls, v):
"""Make sure the given reason matches what's available"""
if v not in reasons:
raise ValueError(f"reason must be one of {str(', '.join(reasons))}")
return v
@validator("iris")
def iris_at_least_one_element(cls, v):
"""Make sure the list contains at least one element"""
if len(v) == 0:
raise ValueError("iris must contain at least one element")
return v
|
# The Fibonacci numbers.
# Reformulate that as
# fold1 = 1
# fold2 = 1
# fnew = fold1 + fold2
# After that, discard fold2 , which is no longer needed, and set fold2 to fold1 and fold1 to
# fnew . Repeat an appropriate number of times.
# Implement a program that prompts the user for an integer n and prints the nth
# Fibonacci number, using the above algorithm.
inputN = int(input("Enter an integer: "))
fold1 = 0
fold2 = 0
fnew = 1
while fnew <= inputN:
print(fnew)
fold2 = fold1
fold1 = fnew
fnew = fold1 + fold2
|
# Generated by Django 3.2.4 on 2021-06-21 13:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("supply_chains", "0021_alter_supplychain_slug"),
]
operations = [
migrations.AddField(
model_name="maturityselfassessment",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name="scenarioassessment",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name="strategicaction",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name="strategicactionupdate",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name="supplychain",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name="vulnerabilityassessment",
name="last_modified",
field=models.DateTimeField(auto_now=True),
),
]
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from oasislmf.model_testing.validation import csv_validity_test
from oasislmf.utils.exceptions import OasisException
class TestValidation(TestCase):
def test_csv_validity_test___model_data_directory_is_empty(self):
"""
Test csv_validity_test when model data directory is empty. Raises
OasisException.
"""
with TemporaryDirectory() as model_data_dir:
self.assertRaises(OasisException, csv_validity_test, model_data_dir)
|
#START HERE
###############################
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
folder_name = "<FOLDER_CONTAINING_SAM_FILES>"
data_file_names = os.listdir(folder_name)
plt.switch_backend('agg')
files = []
for i in data_file_names:
if i[-3:] == "sam":
files.append((folder_name + "/" + i))
for i in files:
fname = i
save_name1 = (fname[:-4] + "_NO_NUC.sam")
save_file1 = open(save_name1, "w")
save_name2 = (fname[:-4] + "_MONO_NUC.sam")
save_file2 = open(save_name2, "w")
save_name3 = (fname[:-4] + "_DI_NUC.sam")
save_file3 = open(save_name3, "w")
save_name4 = (fname[:-4] + "_TRI_NUC.sam")
save_file4 = open(save_name4, "w")
save_name5 = (fname[:-4] + "_ALL_NUC.sam")
save_file5 = open(save_name5, "w")
newtab = '\t'
newline = '\n'
with open(fname) as input:
size_dist = []
for line in input:
if line[0] == "@": #header
save_file1.write(line)
save_file2.write(line)
save_file3.write(line)
save_file4.write(line)
save_file5.write(line)
else:
xx = line.split('\t')
#xx[0] - read name
#xx[2] - ref genome
#xx[3] - map position
#xx[8] - size
#xx[9] - sequence
#xx[10] - q-score
size_dist.append(abs(int(xx[8])))
if abs(int(xx[8])) < 100:
save_file1.write(line)
elif 179 < abs(int(xx[8])) < 248:
save_file2.write(line)
save_file5.write(line)
elif 314 < abs(int(xx[8])) < 474:
save_file3.write(line)
save_file5.write(line)
elif 557 < abs(int(xx[8])) < 616:
save_file4.write(line)
save_file5.write(line)
#plot (size histogram)
fig = plt.figure()
plt.title("Insert size distribution")
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
#ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
ax1 = fig.add_subplot(111)
x = size_dist
numBins = 2000
ax1.hist(x,numBins,color='green')
fig_save_name = fname[:-4] + "_size_histogram.png"
plt.savefig(fig_save_name)
#plot (size histogram log scale)
fig = plt.figure()
plt.title("Insert size distribution")
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
#ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
ax1 = fig.add_subplot(111)
x = size_dist
numBins = 2000
ax1.hist(x,numBins,color='green')
plt.yscale('log', nonposy='clip')
fig_save_name = fname[:-4] + "_size_histogram_log.png"
plt.savefig(fig_save_name)
plt.clf()
save_file1.close()
save_file2.close()
save_file3.close()
save_file4.close()
save_file5.close()
#Make unique size list
unique_bc = set(size_dist)
#Make dictionary
BC_dict = dict.fromkeys(unique_bc,0)
for k in size_dist:
BC_dict[k] += 1
#Write out size count file
save_name = (fname[:-6] + "_fragment_size_counts.txt")
save_file = open(save_name, "w")
header = ("Size", '\t',"Counts",'\n')
save_file.write(''.join(map(str, header)))
newtab = '\t'
newline = '\n'
ordered_keys = sorted(BC_dict)
ordered_values = []
for j in ordered_keys:
save_file.write(str(j))
save_file.write(newtab)
ordered_values.append(BC_dict[j])
save_file.write(str(BC_dict[j]))
save_file.write(newline)
save_file.close()
#Convert sam to bam and index bam files
data_file_names = os.listdir(folder_name)
files = []
for i in data_file_names:
if i[-7:] == "NUC.sam":
files.append((folder_name + "/" + i))
for i in files:
#Convert sam to bam file
bam_name = i[:-3] + "bam"
execute = "samtools view -Sb " + i + " > " + bam_name
os.system(execute)
#Sort bam file
bam_sort_name = i[:-4] + "_sorted.bam"
execute = "samtools sort " + bam_name + " -T Temp -o " + bam_sort_name
os.system(execute)
#Index bam file
execute = "samtools index " + bam_sort_name
os.system(execute)
|
from django.db import models
from django.utils.text import slugify
from django.urls import reverse
class Category(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, unique=True)
class Meta:
ordering = ('name',)
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.name
@property
def get_products(self):
return Product.objects.filter(category=self.id)
class Product(models.Model):
category = models.ForeignKey(Category, related_name='products', on_delete=models.CASCADE, default=True)
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to='products/%Y/%m/%d/', blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
favourite_products = models.BooleanField(default=False)
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-name']
index_together = ['id', 'slug']
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Product, self).save(*args, **kwargs)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("product-detail", kwargs={"id": self.id, "slug": self.slug})
class Contact(models.Model):
name = models.CharField(max_length=200, db_index=True)
email = models.EmailField(max_length=200, db_index=True)
message = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
|
from solution import Foo, Uniquish
def test_foo():
f1 = Foo(10)
f2 = Foo(10)
f3 = Foo(10)
s = {f1, f2, f3}
assert len(s) == 1
assert hash(f1) == hash(f2)
assert hash(f2) == hash(f3)
def test_subclass_non_uniquish():
class Bar():
def __init__(self, x):
self.x = x
b1 = Bar(10)
b2 = Bar(10)
b3 = Bar(10)
s = {b1, b2, b3}
assert len(s) == 3
assert hash(b1) != hash(b2)
assert hash(b2) != hash(b3)
def test_subclass_uniquish():
class Bar(Uniquish):
def __init__(self, x):
self.x = x
b1 = Bar(10)
b2 = Bar(10)
b3 = Bar(10)
s = {b1, b2, b3}
assert len(s) == 1
assert hash(b1) == hash(b2)
assert hash(b2) == hash(b3)
def test_uniquish_unhashable():
class Bar(Uniquish):
def __init__(self, x, y):
self.x, self.y = x, y
b1 = Bar(10, [1])
b2 = Bar(10, [1])
b3 = Bar(10, [2])
s = {b1, b2, b3}
assert len(s) == 2
test_foo()
test_subclass_non_uniquish()
test_subclass_uniquish()
test_uniquish_unhashable()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.