content stringlengths 5 1.05M |
|---|
import matplotlib.pyplot as plt
import datetime
import solarsystem
def create_positions():
now = datetime.datetime.utcnow()
now = datetime.datetime.now(datetime.timezone.utc)
year = now.year
month = now.month
day = now.day
hour = now.hour
minute = now.minute
UT = 0
dst = 0
Hr = solarsystem.Heliocentric(year=year, month=month, day=day, hour=hour, minute=minute,
UT=UT, dst=dst, view='rectangular' )
planetspositionsHrect=Hr.planets()
return planetspositionsHrect
def create_plot(planetspositionsHrect):
forplot=[]
planetname=[]
for key in planetspositionsHrect:
planetname.append(key)
forplot.append( planetspositionsHrect[key] )
plt.figure(figsize=(15,15), frameon=False)
ax = plt.gca()
ax.cla()
ax.set_xlim((-33, 33))
ax.set_ylim((-33, 33))
a=0.5
ax.plot(0,0,'.', label='Sun')
for i in range(8):
ax.plot(forplot[i][0] , forplot[i][1] ,'.', markersize=10, label=planetname[i])
if (i >1) & (i < 10):
ax.add_artist(plt.Circle((0, 0), ((abs(forplot[i][0])**2+abs(forplot[i][1])**2)**0.5), color='r', fill=False))
ax.annotate('Mars', (forplot[3][0] , forplot[3][1]))
ax.legend()
return plt
def image_out(file_name):
pos = create_positions()
plt = create_plot(pos)
plt.savefig(file_name)
|
import warnings
import numpy as np
from numpy.linalg import multi_dot, inv
import scipy.sparse as sparse
from sklearn.base import BaseEstimator, ClassifierMixin
from cvxopt import matrix, solvers
import osqp
class SSLFramework(BaseEstimator, ClassifierMixin):
"""Semi-supervised Learning Framework
"""
@classmethod
def _solve_semi_dual(cls, K, y, Q_, C, solver='osqp'):
"""[summary]
Parameters
----------
K : [type]
[description]
y : [type]
[description]
Q_ : [type]
[description]
C : [type]
[description]
solver : str, optional
[description], by default 'osqp'
Returns
-------
[type]
[description]
"""
if len(y.shape) == 1:
coef_, support_ = cls._semi_binary_dual(K, y, Q_, C, solver)
support_ = [support_]
else:
coef_ = []
support_ = []
for i in range(y.shape[1]):
coef_i, support_i = cls._semi_binary_dual(K, y[:, i], Q_, C, solver)
coef_.append(coef_i.reshape(-1, 1))
support_.append(support_i)
coef_ = np.concatenate(coef_, axis=1)
return coef_, support_
@classmethod
def _semi_binary_dual(cls, K, y_, Q_, C, solver='osqp'):
"""solve min_x x^TPx + q^Tx, s.t. Gx<=h, Ax=b
Parameters
----------
K : [type]
[description]
y_ : [type]
[description]
Q_ : [type]
[description]
C : [type]
[description]
solver : str, optional
[description], by default 'osqp'
Returns
-------
[type]
[description]
"""
nl = y_.shape[0]
n = K.shape[0]
J = np.zeros((nl, n))
J[:nl, :nl] = np.eye(nl)
Q_inv = inv(Q_)
Y = np.diag(y_.reshape(-1))
Q = multi_dot([Y, J, K, Q_inv, J.T, Y])
Q = Q.astype('float32')
alpha = cls._quadprog(Q, y_, C, solver)
coef_ = multi_dot([Q_inv, J.T, Y, alpha])
support_ = np.where((alpha > 0) & (alpha < C))
return coef_, support_
@classmethod
def _quadprog(cls, Q, y, C, solver='osqp'):
"""solve min_x x^TPx + q^Tx, s.t. Gx<=h, Ax=b
Parameters
----------
Q : [type]
[description]
y : [type]
[description]
C : [type]
[description]
solver : str, optional
[description], by default 'osqp'
Returns
-------
[type]
[description]
"""
# dual
nl = y.shape[0]
q = -1 * np.ones((nl, 1))
if solver == 'cvxopt':
G = np.zeros((2 * nl, nl))
G[:nl, :] = -1 * np.eye(nl)
G[nl:, :] = np.eye(nl)
h = np.zeros((2 * nl, 1))
h[nl:, :] = C / nl
# convert numpy matrix to cvxopt matrix
P = matrix(Q)
q = matrix(q)
G = matrix(G)
h = matrix(h)
A = matrix(y.reshape(1, -1).astype('float64'))
b = matrix(np.zeros(1).astype('float64'))
solvers.options['show_progress'] = False
sol = solvers.qp(P, q, G, h, A, b)
alpha = np.array(sol['x']).reshape(nl)
elif solver == 'osqp':
warnings.simplefilter('ignore', sparse.SparseEfficiencyWarning)
P = sparse.csc_matrix((nl, nl))
P[:nl, :nl] = Q[:nl, :nl]
G = sparse.vstack([sparse.eye(nl), y.reshape(1, -1)]).tocsc()
l_ = np.zeros((nl + 1, 1))
u = np.zeros(l_.shape)
u[:nl, 0] = C
prob = osqp.OSQP()
prob.setup(P, q, G, l_, u, verbose=False)
res = prob.solve()
alpha = res.x
else:
raise ValueError('Invalid QP solver')
return alpha
@classmethod
def _solve_semi_ls(cls, Q, y):
"""[summary]
Parameters
----------
Q : [type]
[description]
y : [type]
[description]
Returns
-------
[type]
[description]
"""
n = Q.shape[0]
nl = y.shape[0]
Q_inv = inv(Q)
if len(y.shape) == 1:
y_ = np.zeros(n)
y_[:nl] = y[:]
else:
y_ = np.zeros((n, y.shape[1]))
y_[:nl, :] = y[:, :]
return np.dot(Q_inv, y_)
|
"""
Interfaz para las Unidades.
"""
from typing import Optional
from discord import Interaction, SelectOption
from discord.ui import Select, select
from ..archivos import (DiccionarioGuia, actualizar_guia, cargar_guia,
lista_carpetas, lista_unidades)
from ..constantes import DEFAULT_VERSION, GUIA_PATH
from .ui_ejercicios import SelectorEjercicios
from .ui_general import VistaGeneral
class SelectorGuia(VistaGeneral):
"""
Clase de una UI personalizada para cambiar de guías.
"""
def __init__(self, version_actual: Optional[str]=None) -> None:
"""
Inicializa una instancia de 'SelectorGuia'.
"""
super().__init__()
self.version_actual = version_actual
@select(placeholder="Seleccione una versión de la guía",
custom_id="selector_de_guia",
options=[SelectOption(label=ver) for ver in lista_carpetas(GUIA_PATH)],
max_values=1)
async def seleccionar_guia(self, interaccion: Interaction, seleccion: Select) -> None:
"""
Muestra y selecciona una versión específica de la guía.
"""
version_vieja = self.version_actual
nueva_version = seleccion.values[0] # Debería tener sólo un elemento
self.version_actual = nueva_version
actualizar_guia(nueva_version, str(interaccion.message.guild.id))
formato_log = {"guild": interaccion.guild.name,
"old_ver": version_vieja,
"new_ver": nueva_version}
self.log.info("En '%(guild)s', la versión de la guía fue cambiada " % formato_log +
"de %(old_ver)s a %(new_ver)s exitosamente" % formato_log)
await interaccion.response.edit_message(content="**[AVISO]** La versión de la guía " +
f"fue cambiada{f' de `{version_vieja}`' if version_vieja else ''} a " +
f"`{nueva_version}` exitosamente.",
view=None)
class MenuSelectorUnidad(Select):
"""
Clase que representa un menú selector de Unidades, no la interfaz en sí.
"""
def __init__(
self,
*,
custom_id: str="menu_selector_unidad",
placeholder: Optional[str]="Seleccione una Unidad",
min_values: int=1,
max_values: int=1,
disabled: bool=False,
row: Optional[int]=None,
guia: DiccionarioGuia=cargar_guia(DEFAULT_VERSION)
) -> None:
"""
Inicializa una instacia de 'MenuSelectorUnidad'.
"""
self.guia = guia
opciones = [SelectOption(label=f"Unidad {unidad}",
description=self.guia[unidad]["titulo"],
value=unidad)
for unidad in lista_unidades(self.guia)]
super().__init__(custom_id=custom_id,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
options=opciones,
disabled=disabled,
row=row)
async def callback(self, interaction: Interaction) -> None:
"""
Procesa la unidad elegida por el usuario del menú selector.
"""
unidad_elegida = self.values[0]
vista = SelectorEjercicios(guia=self.guia, unidad=unidad_elegida)
mensaje_enviado = await interaction.response.edit_message(content="Elija el ejercicio",
view=vista)
class SelectorUnidad(VistaGeneral):
"""
Clase de una UI personalizada para seleccionar unidades
de ejercicios.
"""
def __init__(self, guia: DiccionarioGuia) -> None:
"""
Inicializa una instancia de 'SelectorUnidad'.
"""
super().__init__()
self.add_item(MenuSelectorUnidad(guia=guia))
|
#
# Modules Import
#
import boto3
import json
import os
#
# Variables Definition
#
ecs_cluster = os.environ['ECS_CLUSTER']
excluded_services = os.environ['EXCLUDED_SERVICES'].split(' ')
result = { 'stoppedServices': [ ], 'excludedServices': [ ] }
#
# Function to print the boto3 responses in JSON format
#
def json_response(response):
return json.dumps(
response,
default=str,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
#
# Main
#
def lambda_handler(event, context):
print("\n##### Starting execution #####\n")
ecs = boto3.client('ecs')
# Get all services belonging to the ECS cluster
paginator = ecs.get_paginator('list_services')
response_iterator = paginator.paginate(cluster=ecs_cluster)
ecs_services = []
for page in response_iterator:
ecs_services.extend(page['serviceArns'])
for service in ecs_services:
if os.path.basename(service) in excluded_services:
print("\n'" + service + "' ECS service is excluded")
result['excludedServices'].append(service)
else:
print("\nScaling in '" + service + "' ECS service tasks to 0...")
response = ecs.update_service(
cluster=ecs_cluster,
service=service,
desiredCount=0
)
#print(json_response(response))
result['stoppedServices'].append(service)
print("\nFinal result:\n" + json_response(result) + "\n")
print("\n##### Execution finished #####\n")
return {
'statusCode': 200,
'body': json_response(result)
}
|
"""
This is a useful table when we want to test all possible column types.
"""
from piccolo.columns.column_types import (
JSON,
JSONB,
UUID,
BigInt,
Boolean,
Bytea,
Date,
DoublePrecision,
ForeignKey,
Integer,
Interval,
Numeric,
Real,
SmallInt,
Text,
Timestamp,
Timestamptz,
Varchar,
)
from piccolo.table import Table
class SmallTable(Table):
varchar_col = Varchar()
class MegaTable(Table):
"""
A table containing all of the column types, and different column kwargs.
"""
bigint_col = BigInt()
boolean_col = Boolean()
bytea_col = Bytea()
date_col = Date()
foreignkey_col = ForeignKey(SmallTable)
integer_col = Integer()
interval_col = Interval()
json_col = JSON()
jsonb_col = JSONB()
numeric_col = Numeric(digits=(5, 2))
real_col = Real()
double_precision_col = DoublePrecision()
smallint_col = SmallInt()
text_col = Text()
timestamp_col = Timestamp()
timestamptz_col = Timestamptz()
uuid_col = UUID()
varchar_col = Varchar()
unique_col = Varchar(unique=True)
null_col = Varchar(null=True)
not_null_col = Varchar(null=False)
|
import os, itertools
from datetime import datetime, timedelta
import numpy as np
import xarray as xr
import pandas as pd
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
"""
SCALING of fields
---Flash extent density---
For 330 ms time separation critera, the max flash rate per minute in any pixel
is 181.8/min, or a max hourly value of 10860.
10860/65535 = 1/6
Another way to go: for an 8x8 km GLM pixel oversampled to 2x2 km,
the fractional area coverage is 1/16. This works out to 4095 flashes per minute
max, which seems plenty safe and covers the absolute max accumulation in 20 min.
2 bytes unsigned
scale=1/16=0.0625
offset=0
---Group extent density---
Have a max in relampago case of 1000/min (max is 30000/min for a 500 fps CCD),
so over 20 min this is 20000 events.
That is 5x the max we assume above in FED, which is equivalent to 5 groups/flash
assuming a full flash rate. And that full flash rate is unlikely to be reached!
4*1/16 = 1/4 would preserve a bit of antialiasing and multiplies nicely.
Set scale to 1/4 = 0.25
Offset = 0
---Flash and group centroid density---
GCD will be larger than FCD, so size with GCD. The max rate for CGD will likely
be the same as the GED - a maximum at the center of the cell. We can get away
with two bytes, scale and offset of 1 because there is no antialiasing.
scale=1
offset=0
---Average flash area, group area---
WMO record distance flash distance is 320 km, which squared is ~100,000 km^2.
Minimum observable flash area is 64 km^2, which divided by 16 (for smallest
fractional pixel coverage at 2 km) is 4 km^2.
A 2 km^2 scale factor gets us 131070 max in two bytes.
However, since 10000 km^2 is the max flash area in the L2 data, it will never
be exceeded, and so we set scale to 1 and offset to 0, for a max 65535.
2 bytes, unsigned
scale_factor = 1 km^2
offset = 0
---Total optical energy---
2 bytes, linear
scale_factor=1.52597e-15 J, add_offset=0 (range 0 to 1.0e-10 J)
1.0e-10 is also the max event_energy value in the L2 files, so we can’t have
more than one event that hits that level. However, I think this should be safe.
The fact that the flash_energy has the same scale factor as event_energy in L2
suggests that there is margin in the ceiling of 1e-10 J. And as Scott's stats
showed, pixels with total energy in excess of even 10e-12 J are quite rare.
"""
glm_scaling = {
'flash_extent_density':{'dtype':'uint16',
'scale_factor':0.0625, 'add_offset':0.0},
'flash_centroid_density':{'dtype':'uint16',
'scale_factor':1.0, 'add_offset':0.0},
'average_flash_area':{'dtype':'uint16',
'scale_factor':10.0, 'add_offset':0.0},
'minimum_flash_area':{'dtype':'uint16',
'scale_factor':10.0, 'add_offset':0.0},
'event_density':{'dtype':'uint16',
'scale_factor':0.25, 'add_offset':0.0},
# 'standard_deviation_flash_area',
'group_extent_density':{'dtype':'uint16',
'scale_factor':0.25, 'add_offset':0.0},
'group_centroid_density':{'dtype':'uint16',
'scale_factor':1.0, 'add_offset':0.0},
'average_group_area':{'dtype':'uint16',
'scale_factor':1.0, 'add_offset':0.0},
'total_energy':{'dtype':'uint16',
'scale_factor':1.52597e-6, 'add_offset':0.0,},
}
def get_goes_imager_subpoint_vars(nadir_lon):
""" Returns two xarray DataArrays containing the nominal satellite subpoint
latitude and longitude, as netCDF float variables.
returns subpoint_lon, subpoint_lat
"""
sublat_meta = {}
sublat_descr = "nominal satellite subpoint latitude (platform latitude)"
sublat_meta['long_name'] = sublat_descr
sublat_meta['standard_name'] = 'latitude'
sublat_meta['units'] = 'degrees_north'
sublat_enc = {'_FillValue':-999.0}
sublon_meta = {}
sublon_descr = "nominal satellite subpoint longitude (platform longitude)"
sublon_meta['long_name'] = sublon_descr
sublon_meta['standard_name'] = 'longitude'
sublon_meta['units'] = 'degrees_east'
sublon_enc = {'_FillValue':-999.0}
sublat = xr.DataArray(0.0, name='nominal_satellite_subpoint_lat',
attrs=sublat_meta)
sublat.encoding = sublat_enc
sublon = xr.DataArray(nadir_lon, name='nominal_satellite_subpoint_lon',
attrs=sublon_meta)
sublon.encoding = sublon_enc
return sublon, sublat
def get_goes_imager_proj(nadir_lon):
""" Returns an xarray DataArray containing the GOES-R series
goes_imager_projection data and metadata
"""
meta = {}
meta['long_name'] = "GOES-R ABI fixed grid projection"
meta['grid_mapping_name'] = "geostationary"
meta['perspective_point_height'] = 35786023.
meta['semi_major_axis'] = 6378137.
meta['semi_minor_axis'] = 6356752.31414
meta['inverse_flattening'] = 298.2572221
meta['latitude_of_projection_origin'] = 0.0
meta['longitude_of_projection_origin'] = nadir_lon
meta['sweep_angle_axis'] = "x"
encoding = {}
encoding['dtype'] = 'i4'
var = xr.DataArray(-2147483647, attrs=meta, name='goes_imager_projection')
var.encoding=encoding
return var
def get_goes_imager_all_valid_dqf(dims, n):
""" dims is a tuple of dimension names in the same order as n, the number
of elements along each dimension
Returns dqf, an xarray.DataArray of the GLM data quality
field, in the style of the GOES-R series DQF field """
meta = {}
meta['grid_mapping'] = "goes_imager_projection"
meta['number_of_qf_values'] = np.asarray(6, dtype='i4')
meta['units'] = "1"
meta['standard_name'] = "status_flag"
meta['long_name'] = "GLM data quality flags"
meta['flag_values'] = np.asarray((0,1), dtype='i4')
meta['flag_meanings'] = "valid, invalid"
dqf = np.zeros(n, dtype='u1')
enc = {}
enc['_FillValue'] = np.asarray(255, dtype='u1')
enc['_Unsigned'] = "true"
enc['dtype'] = 'i1'
enc['zlib'] = True # compress the field
dqf_var = xr.DataArray(dqf, dims=dims, attrs=meta, name="DQF")
dqf_var.encoding = enc
return dqf_var
def get_goes_imager_fixedgrid_coords(x, y, resolution='2km at nadir',
scene_id='FULL', fill=-999.0):
""" Create variables with metadata for fixed grid coordinates as defined
for the GOES-R series of spacecraft.
Assumes that imagery are at 2 km resolution (no other options are
implemented), and applies the scale and offset values indicated in the
GOES-R PUG for the full disk scene, guaranteeing that we cover all fixed
grid coordinates.
Arguments:
x, y: 1-dimensional arrays of coordinate values
resolution: like "2km at nadir"
scene_id: 'FULL' is the only allowed argument; other values will be ignored
Returns:
x_var, y_var: xarray.DataArray objects, with type inferred from x and y.
"""
scene_id='FULL'
# Values from the GOES-R PUG. These are signed shorts (int16).
two_km_enc = {
'FULL':{'dtype':'int16', 'x':{'scale_factor': 0.000056,
'add_offset':-0.151844,
'_FillValue':-999.0},
'y':{'scale_factor':-0.000056,
'add_offset':0.151844,
'_FillValue':-999.0},
},
# The PUG has specific values for the CONUS sector, and
# given the discretization of the coords to 2 km resolution, is it necessary
# to special-case each scene so the span of the image? Right now ONLY
# GOES-EAST CONUS IS IMPLEMENTED as a special case, with scene_id='CONUS'.
# 'CONUS':{'dtype':'int16', 'x':{'scale_factor': 0.000056,
# 'add_offset':-0.101332},
# 'y':{'scale_factor':-0.000056,
# 'add_offset':0.128212},
# }
# 'MESO1', 'MESO2', 'OTHER'
}
# two_km_enc['OTHER'] = two_km_enc['MESO1']
x_meta, y_meta = {}, {}
x_enc = two_km_enc['FULL']['x']
x_enc['dtype'] = two_km_enc[scene_id]['dtype']
y_enc = two_km_enc['FULL']['y']
y_enc['dtype'] = two_km_enc[scene_id]['dtype']
x_meta['axis'] = "X"
x_meta['long_name'] = "GOES fixed grid projection x-coordinate"
x_meta['standard_name'] = 'projection_x_coordinate'
x_meta['units'] = "rad"
y_meta['axis'] = "Y"
y_meta['long_name'] = "GOES fixed grid projection y-coordinate"
y_meta['standard_name'] = 'projection_y_coordinate'
y_meta['units'] = "rad"
x_coord = xr.DataArray(x, name='x', dims=('x',),
attrs=x_meta)
x_coord.encoding = x_enc
y_coord = xr.DataArray(y, name='y', dims=('y',),
attrs=y_meta)
y_coord.encoding = y_enc
return x_coord, y_coord
def get_glm_global_attrs(start, end, platform, slot, instrument, scene_id,
resolution, timeline, prod_env, prod_src, prod_site, ):
"""
Create the global metadata attribute dictionary for GOES-R series GLM
Imagery products.
Arguments:
start, end: datetime of the start and end times of image coverage
platform: one of G16, G17 or a follow-on platform
slot: the orbital slot ("GOES-East", "GOES-West", etc.)
instrument: one of "GLM-1", "GLM-2", or a follow on instrument.
scene_id: one of 'FULL', 'CONUS', 'MESO1', or 'MESO2' if compatible with
the ABI definitions or 'OTHER'.
resolution: like "2km at nadir"
prod_env: "OE", "DE", etc.
prod_src: "Realtime" or "Postprocessed"
prod_site: "NAPO", "TTU", etc.
The date_created is set to the time at which this function is run.
Returns: meta, a dictionary of metadata attributes.
"""
created = datetime.now()
modes = {'ABI Mode 3':'M3'}
# For use in the dataset name / filename
scenes = {'FULL':'F',
'CONUS':'C',
'MESO1':'M1',
'MESO2':'M2',
'OTHER':'M1'}
scene_names = {"FULL":"Full Disk",
"CONUS":"CONUS",
"MESO1":"Mesoscale",
"MESO2":"Mesoscale",
"OTHER":"Custom"}
# "OR_GLM-L2-GLMC-M3_G16_s20181011100000_e20181011101000_c20181011124580.nc
dataset_name = "OR_GLM-L2-GLM{5}-{0}_{1}_s{2}_e{3}_c{4}.nc".format(
modes[timeline], platform, start.strftime('%Y%j%H%M%S0'),
end.strftime('%Y%j%H%M%S0'), created.strftime('%Y%j%H%M%S0'),
scenes[scene_id]
)
meta = {}
# Properties that don't change
meta['cdm_data_type'] = "Image"
meta['Conventions'] = "CF-1.7"
meta['id'] = "93cb84a3-31ef-4823-89f5-c09d88fc89e8"
meta['institution'] = "DOC/NOAA/NESDIS > U.S. Department of Commerce, National Oceanic and Atmospheric Administration, National Environmental Satellite, Data, and Information Services"
meta['instrument_type'] = "GOES R Series Geostationary Lightning Mapper"
meta['iso_series_metadata_id'] = "f5816f53-fd6d-11e3-a3ac-0800200c9a66"
meta['keywords'] = "ATMOSPHERE > ATMOSPHERIC ELECTRICITY > LIGHTNING, ATMOSPHERE > ATMOSPHERIC PHENOMENA > LIGHTNING"
meta['keywords_vocabulary'] = "NASA Global Change Master Directory (GCMD) Earth Science Keywords, Version 7.0.0.0.0"
meta['license'] = "Unclassified data. Access is restricted to approved users only."
meta['Metadata_Conventions'] = "Unidata Dataset Discovery v1.0"
meta['naming_authority'] = "gov.nesdis.noaa"
meta['processing_level'] = "National Aeronautics and Space Administration (NASA) L2"
meta['project'] = "GOES"
meta['standard_name_vocabulary'] = "CF Standard Name Table (v25, 05 July 2013)"
meta['summary'] = "The Lightning Detection Gridded product generates fields starting from the GLM Lightning Detection Events, Groups, Flashes product. It consists of flash extent density, event density, average flash area, average group area, total energy, flash centroid density, and group centroid density."
meta['title'] = "GLM L2 Lightning Detection Gridded Product"
# Properties that change
meta['dataset_name'] = dataset_name
meta['date_created'] = created.isoformat()+'Z'
meta['instrument_ID'] = instrument
meta['orbital_slot'] = slot
meta['platform_ID'] = platform
meta['production_data_source'] = prod_src
meta['production_environment'] = prod_env
meta['production_site'] = prod_site
meta['scene_id'] = scene_names[scene_id]
meta['spatial_resolution'] = resolution
meta['time_coverage_end'] = end.isoformat()+'Z'
meta['time_coverage_start'] = start.isoformat()+'Z'
meta['timeline_id'] = timeline
return meta
def glm_image_to_var(data, name, long_name, units, dims, fill=0.0,
scale_factor=None, add_offset=None, dtype=None):
"""
data: array of data
name: the standard name, CF-compliant if possible
long_name: a more descriptive name
units: udunits string for the units of data
dims: tuple of coordinate names
dtype: numpy dtype of variable to be written after applying scale and offset
If dtype is not None, then the following are also checked
scale_factor, add_offset: floating point discretization and offset, as
commonly used in NetCDF datasets.
decoded = scale_factor * encoded + add_offset
Returns: data_var, xarray.DataArray objects, with type inferred from data
"""
enc = {}
meta = {}
enc['_FillValue'] = fill
enc['zlib'] = True # Compress the data
if dtype is not None:
orig_dtype = dtype
if orig_dtype[0] == 'u':
enc['_Unsigned'] = 'true'
dtype= dtype[1:]
enc['dtype'] = dtype
if scale_factor is not None:
enc['scale_factor'] = scale_factor
min_allowed = scale_factor
if add_offset is not None:
min_allowed += add_offset
tiny = (data > 0.0) & (data <= min_allowed)
data[tiny] = min_allowed
if add_offset is not None:
enc['add_offset'] = add_offset
meta['standard_name'] = name
meta['long_name'] = long_name
meta['units'] = units
meta['grid_mapping'] = "goes_imager_projection"
d = xr.DataArray(data, attrs=meta, dims=dims, name=name)
d.encoding = enc
return d
def new_goes_imagery_dataset(x, y, nadir_lon):
""" Create a new xarray.Dataset with the basic coordiante data, metadata,
and global attributes that matches the GOES-R series fixed grid imagery
format.
Arguments
x, y: 1-D arrays of coordinate positions
nadir_lon: longitude (deg) of the sub-satellite point
"""
# Dimensions
dims = ('y', 'x')
scene_id, nominal_resolution = infer_scene_from_dataset(x, y)
log.debug("Span of grid implies scene is {0}".format(scene_id))
# Coordinate data: x, y
xc, yc = get_goes_imager_fixedgrid_coords(x, y, scene_id=scene_id)
# Coordinate reference system
goes_imager_proj = get_goes_imager_proj(nadir_lon)
subpoint_lon, subpoint_lat = get_goes_imager_subpoint_vars(nadir_lon)
# Data quality flags
dqf = get_goes_imager_all_valid_dqf(dims, y.shape+x.shape)
v = {goes_imager_proj.name:goes_imager_proj,
dqf.name:dqf,
subpoint_lat.name:subpoint_lat,
subpoint_lon.name:subpoint_lon,
}
c = {xc.name:xc, yc.name:yc}
d = xr.Dataset(data_vars=v, coords=c)#, dims=dims)
# Attributes aren't carried over for xc and yc like they are for dqf, etc.
# so copy them over manually
d.x.attrs.update(xc.attrs)
d.y.attrs.update(yc.attrs)
return d, scene_id, nominal_resolution
def xy_to_2D_lonlat(gridder, x_coord, y_coord):
self = gridder
mapProj = self.mapProj
geoProj = self.geoProj
x_all, y_all = (a.T for a in np.meshgrid(x_coord, y_coord))
assert x_all.shape == y_all.shape
assert x_all.shape[0] == nx
assert x_all.shape[1] == ny
z_all = np.zeros_like(x_all)
lons, lats, alts = x,y,z = geoProj.fromECEF(
*mapProj.toECEF(x_all, y_all, z_all) )
lons.shape=x_all.shape
lats.shape=y_all.shape
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def infer_scene_from_dataset(x, y):
"Infer whether the scene matches one of the GOES-R fixed grid domains."
from lmatools.grid.fixed import goesr_conus, goesr_meso, goesr_full, goesr_resolutions
rtol = 1.0e-2
# Try to match up the actual spacing in microradians with a known resolutions
dx = np.abs(x[1]-x[0])
resolution = '{:d}microradian at nadir'.format(int(np.round(dx*1e6)))
for km, microrad in goesr_resolutions.items():
if np.allclose(microrad, dx, rtol=rtol):
resolution = km.replace('.0', '') + ' at nadir'
spanEW = x.max() - x.min()
spanNS = y.max() - y.min()
log.debug("Inferring scene from spans x={0}, y={1}".format(spanEW, spanNS))
if (np.allclose(spanEW, goesr_full['spanEW'], rtol=rtol) &
np.allclose(spanNS, goesr_full['spanNS'], rtol=rtol) ):
scene_id = "FULL"
elif (np.allclose(spanEW, goesr_conus['spanEW'], rtol=rtol) &
np.allclose(spanNS, goesr_conus['spanNS'], rtol=rtol) ):
scene_id = "CONUS"
elif (np.allclose(spanEW, goesr_meso['spanEW'], rtol=rtol) &
np.allclose(spanNS, goesr_meso['spanNS'], rtol=rtol) ):
scene_id = "MESO1"
elif (np.allclose(spanEW, 0.172732, rtol=rtol)):
# This is the "expanded CONUS" domain for GOES-West;
# see lmatools.grid.fixed for further details.
scene_id = "CONUS"
else:
scene_id = "OTHER"
return scene_id, resolution
def write_goes_imagery(gridder, outpath='./{dataset_name}', pad=None, scale_and_offset=True):
""" pad is a tuple of x_slice, y_slice: slice objects used to index the
zeroth and first dimensions, respectively, of the grids in gridder.
scale_and_offset controls whether to write variables as scaled ints.
if False, floating point grids will be written.
outpath can be a template string; defaults to {'./{dataset_name}'}
Available named arguments in the template are:
dataset_name: standard GOES imagery format, includes '.nc'. Looks like
OR_GLM-L2-GLMM1-M3_G16_s20181830432000_e20181830433000_c20200461148520.nc
start_time, end_time: datetimes that can be used with strftime syntax, e.g.
'./{start_time:%y/%b/%d}/GLM_{start_time:%Y%m%d_%H%M%S}.nc'
Intermediate directories will be created to match outpath.
"""
self = gridder
if pad is not None:
x_slice, y_slice = pad
else:
x_slice, y_slice = (slice(None, None), slice(None, None))
fixProj = self.mapProj
geoProj = self.geoProj
# Center of the grid is the nadir longitude
subsat = (0.0, 0.0, 0.0)
nadir_lon, nadir_lat, nadir_alt = geoProj.fromECEF(*fixProj.toECEF(*subsat))
# Get 1D x and y coordinates, and corresponding lons and lats
spatial_scale_factor = self.spatial_scale_factor
xedge = self.xedge
yedge = self.yedge
x_coord = ((xedge[:-1] + xedge[1:])/2.0)[x_slice]
y_coord = ((yedge[:-1] + yedge[1:])/2.0)[y_slice]
file_iter = list(zip(self.outgrids, self.field_names,
self.field_descriptions, self.field_units, self.outformats))
# Write a separate file at each time.
all_outfiles = []
for ti, (t0, t1) in enumerate(pairwise(self.t_edges_seconds)):
start = self.t_ref + timedelta(0, t0)
end = self.t_ref + timedelta(0, t1)
log.info("Assembling NetCDF dataset for {0} - {1}".format(start, end))
# Need to flip the y coordinate to reverse order since y is defined as
# upper left in the GOES-R series L1b PUG (section 5.1.2.6 Product Data
# Structures). Later, to follow the image array convention will
# transpose the grids and then flipud.
dataset, scene_id, nominal_resolution = new_goes_imagery_dataset(x_coord,
np.flipud(y_coord), nadir_lon)
# Global metadata
l2lcfa_attrs = gridder.first_file_attrs
global_attrs = get_glm_global_attrs(start, end,
l2lcfa_attrs['platform_ID'], l2lcfa_attrs['orbital_slot'],
l2lcfa_attrs['instrument_ID'], scene_id,
nominal_resolution, "ABI Mode 3", "DE", "Postprocessed", "TTU"
)
dataset = dataset.assign_attrs(**global_attrs)
# log.debug("*** Checking x coordinate attrs initial")
# log.debug(dataset.x.attrs)
outfile = outpath.format(start_time=start, end_time=end,
dataset_name=dataset.attrs['dataset_name'])
enclosing_dir = os.path.dirname(outfile)
if os.path.exists(enclosing_dir) == False:
os.makedirs(enclosing_dir)
# Adding a new variable to the dataset below clears the coord attrs
# so hold on to them for now.
xattrs, yattrs = dataset.x.attrs, dataset.y.attrs
xenc, yenc = dataset.x.encoding, dataset.y.encoding
for i, (grid_allt, field_name, description, units, outformat) in enumerate(file_iter):
grid = grid_allt[x_slice,y_slice,ti]
if i in self.divide_grids:
denom = self.outgrids[self.divide_grids[i]][x_slice,y_slice,ti]
zeros = (denom == 0) | (grid == 0)
nonzeros = ~zeros
grid[nonzeros] = grid[nonzeros]/denom[nonzeros]
grid[zeros] = 0 # avoid nans
image_at_time = np.flipud(grid.T)
scale_kwargs = {}
if (field_name in glm_scaling) and scale_and_offset:
scale_kwargs.update(glm_scaling[field_name])
img_var = glm_image_to_var(image_at_time,
field_name, description, units,
('y', 'x'), **scale_kwargs)
# Why does this line clear the attrs on the coords?
# log.debug("*** Checking x coordinate attrs {0}a".format(i))
# log.debug(dataset.x.attrs)
dataset[img_var.name] = img_var
# log.debug("*** Checking x coordinate attrs {0}b".format(i))
# log.debug(dataset.x.attrs)
# Restore the cleared coord attrs
dataset.x.attrs.update(xattrs)
dataset.y.attrs.update(yattrs)
dataset.x.encoding.update(xenc)
dataset.y.encoding.update(yenc)
# log.debug("*** Checking x coordinate attrs final")
# log.debug(dataset.x.attrs)
log.info("Preparing to write NetCDF {0}".format(outfile))
dataset.to_netcdf(outfile)
log.info("Wrote NetCDF {0}".format(outfile))
all_outfiles.append(outfile)
return all_outfiles
def aggregate(glm, minutes, start_end=None):
""" Given a multi-minute glm imagery dataset (such as that returned by
glmtools.io.imagery.open_glm_time_series) and an integer number of minutes,
recalculate average and minimum flash area for that interval and sum all other
fields.
start_end: datetime objects giving the start and end edges of the interval to
be aggregated. This allows for a day-long dataset to be aggregated over an hour
of interest, for example. If not provided, the start of the glm dataset plus
*minutes* after the end of the glm dataset will be used.
This function expects the GLM data to have missing values (NaN) where
there is no lightning and relies on the skipna functionality of
DataArray.min() and .sum() to find the aggregation across frames where
each pixel has a mix of lightning and no-lightnign times. If these
frames instead had zero for minimum flash area the minimum value would
be zero, leading to data loss and a clear difference between FED and
MFA.
To restore the original time coordinate name, choose the left, mid, or right
endpoint of the time_bins coordinate produced by the aggregation step.
>>> agglm = aggregate(glm, 5)
>>> agglm['time_bins'] = [v.left for v in agglm.time_bins.values]
>>> glm_agg = agglm.rename({'time_bins':'time'})
"""
dt_1min = timedelta(seconds=60)
dt = dt_1min*minutes
if start_end is not None:
start = start_end[0]
end = start_end[1]
else:
start = pd.Timestamp(glm['time'].min().data).to_pydatetime()
end = pd.Timestamp(glm['time'].max().data).to_pydatetime() + dt
# The lines below might be necessary replacements for a future version
# start = pd.to_datetime(glm['time'].min().data).to_pydatetime()
# end = pd.to_datetime(glm['time'].max().data).to_pydatetime() + dt
# dt_np = (end - start).data
# duration = pd.to_timedelta(dt_np).to_pytimedelta()
duration = end - start
sum_vars = ['flash_extent_density', 'flash_centroid_density',
'total_energy',
'group_extent_density', 'group_centroid_density', ]
sum_vars = [sv for sv in sum_vars if sv in glm]
sum_data = glm[sum_vars]
# goes_imager_projection is a dummy int variable, and all we care about
# is the attributes.
min_vars = ['minimum_flash_area', 'goes_imager_projection',
'nominal_satellite_subpoint_lat', 'nominal_satellite_subpoint_lon']
min_vars = [mv for mv in min_vars if mv in glm]
min_data = glm[min_vars]
sum_data['total_flash_area'] = glm.average_flash_area*glm.flash_extent_density
sum_data['total_group_area'] = glm.average_flash_area*glm.flash_extent_density
t_bins = [start + dt*i for i in range(int(duration/dt)+1)]
t_groups_sum = sum_data.groupby_bins('time', bins=t_bins)
t_groups_min = min_data.groupby_bins('time', bins=t_bins)
aggregated_min = t_groups_min.min(dim='time', keep_attrs=True, skipna=True)
# Naively sum all variables … so average areas are now ill defined. Recalculate
aggregated = t_groups_sum.sum(dim='time', keep_attrs=True, skipna=True)
aggregated['average_flash_area'] = (aggregated.total_flash_area
/ aggregated.flash_extent_density)
aggregated['average_group_area'] = (aggregated.total_group_area
/ aggregated.group_extent_density)
for var in min_vars:
aggregated[var] = aggregated_min[var]
# direct copy of other useful attributes. This could be handled better, since it brings
# along the original time dimension, but the projection is a static value and is safe
# to move over. We skip 'DQF' since it's empty and not clear what summing it would mean.
# Someone can make that decision later when DQF is populated.
# for v in ['goes_imager_projection']:
# aggregated[v] = glm[v]
# time_bins is made up of Interval objects with left and right edges
aggregated.attrs['time_coverage_start'] = min(
[v.left for v in aggregated.time_bins.values]).isoformat()
aggregated.attrs['time_coverage_end'] = max(
[v.right for v in aggregated.time_bins.values]).isoformat()
return aggregated
def gen_file_times(filenames, time_attr='time_coverage_start'):
for s in filenames:
with xr.open_dataset(s) as d:
# strip off timezone information so that xarray
# auto-converts to datetime64 instead of pandas.Timestamp objects
yield pd.Timestamp(d.attrs[time_attr]).tz_localize(None)
def open_glm_time_series(filenames, chunks=None):
""" Convenience function for combining individual 1-min GLM gridded imagery
files into a single xarray.Dataset with a time dimension.
Creates an index on the time dimension.
The time dimension will be in the order in which the files are listed
due to the behavior of combine='nested' in open_mfdataset.
Adjusts the time_coverage_start and time_coverage_end metadata.
"""
# Need to fix time_coverage_start and _end in concat dataset
starts = [t for t in gen_file_times(filenames)]
ends = [t for t in gen_file_times(filenames, time_attr='time_coverage_end')]
d = xr.open_mfdataset(filenames, concat_dim='time', chunks=chunks, combine='nested')
d['time'] = starts
d = d.set_index({'time':'time'})
d = d.set_coords('time')
d.attrs['time_coverage_start'] = pd.Timestamp(min(starts)).isoformat()
d.attrs['time_coverage_end'] = pd.Timestamp(max(ends)).isoformat()
return d |
# coding: utf-8
from py2neo import Graph
from py2neo import Node, Relationship
import json
import pandas as pd
import sys
import os
# Connecting to neo4j graph database
graph = Graph('http://localhost:7474', password='1234')
# These are the entities on which data is extracted
entities = ['imran khan', 'nawaz sharif', 'bilawal bhutto', 'asif zardari',
'arif alvi', 'maryam nawaz', 'asif khosa', 'fawad chaudhry', 'fazal ur rehman',
'shehbaz sharif', 'qamar bajwa', 'altaf hussain', 'pervez musharraf',
'mustafa kamal', 'siraj ul haq', 'sheikh rasheed', 'pervez khattak', 'asad umar',
'murad ali shah', 'aitzaz ahsan', 'asif ghafoor', 'PTI', 'PMLN', 'PPP', 'JUI', 'MQM']
if __name__ == "__main__":
keyword = ""
flag = False
# Extracting keyword from command line argument
for word in sys.argv:
if not flag:
flag = True
continue
keyword += word + " "
keyword = keyword.rstrip()
''' Extract summary, tweet and video files '''
files = os.listdir('model/')
file = ''
twitter_file = ''
youtube_file = ''
for f in files:
if 'tweets' in f and keyword in f:
twitter_file = 'model/' + f
elif 'videos' in f and keyword in f:
youtube_file = 'model/' + f
elif 'summary' in f and keyword in f:
file = 'model/' + f
# Twitter data
with open(twitter_file, encoding='utf-8') as json_file:
tweet_data = json.load(json_file)
# Youtube data
with open(youtube_file, encoding='utf-8') as json_file:
yt_data = json.load(json_file)
# Summary data
with open(file, encoding='utf-8') as json_file:
data = json.load(json_file)
# Adding summaries to graph
for i in range(0, len(data)):
# Add a node of each "Story" of the summary
summary = Node("Story", Topic=data[i]['topic']
, Summary=data[i]['Summary']
, Sentiment=data[i]['Sentiment']
, Date=data[i]['Date'])
graph.create(summary)
'''
Adding the Knowledge Graph layer, this inclues
- identifying entities among stories
- identifying sub stories among stories
- and their relationships
'''
flag = False
# Match full keyword
for entity in entities:
if entity in data[i]['Summary'].lower():
node = graph.evaluate('MATCH(e:Entity {name: "' + entity + '"}) RETURN e')
if not node:
node = graph.evaluate('CREATE(e:Entity {name: "' + entity + '"}) RETURN e')
# Create relationship of story with an entity
r = Relationship(node, "HAS_STORY", summary)
graph.create(r)
flag = True
# No entity related to story, link to another story
if not flag:
# Delete the previous Story node
graph.delete(summary)
# Create a "Substory" node of the summary instead
summary = Node("Substory", Topic=data[i]['topic']
, Summary=data[i]['Summary']
, Sentiment=data[i]['Sentiment']
, Date=data[i]['Date'])
graph.create(summary)
# Extract related stories
query = "MATCH(n:Story) WHERE "
for word in keyword.split():
query += "toLower(n.Summary) CONTAINS '" + word + "'" + " AND "
query = query.rstrip(' AND ')
query += " RETURN n"
ss = graph.run(query).to_table()
# Create substory relationship
if ss:
for story_node in ss:
r = Relationship(story_node[0], "HAS_SUBSTORY", summary)
graph.create(r)
# Adding tweets belonging to that summary
for j in range(0, len(tweet_data)):
if tweet_data[j]['Topic'] == i:
# Create node based on type of tweet
tweet_type = tweet_data[j]['Type']
tweet = Node(tweet_type, Created_time = tweet_data[j]['Created_time']
, URL = tweet_data[j]['URL']
, User_name = tweet_data[j]['User_name']
, Twitter_handle = tweet_data[j]['Twitter_handle']
, Description = tweet_data[j]['Description']
, Retweet_count = tweet_data[j]['Retweet_count']
, Favorite_count = tweet_data[j]['Favorite_count']
, Sentiment = tweet_data[j]['Sentiment']
, Type = tweet_data[j]['Type']
, Topic = tweet_data[j]['Topic'])
graph.create(tweet)
r = Relationship(summary, "HAS_TWEET", tweet)
graph.create(r)
#Adding videos belonging to that summary
for j in range(0, len(yt_data)):
if yt_data[j]['Topic'] == i:
video = Node("Video", Published_date = yt_data[j]['Published_date']
, Title = yt_data[j]['Title']
, URL = yt_data[j]['URL']
, Channel_id = yt_data[j]['Channel_id']
, Topic = yt_data[j]['Topic'])
graph.create(video)
r = Relationship(summary, "HAS_VIDEO", video)
graph.create(r)
print("Done with", keyword) |
import unittest
import json
from src.Validator.TimeValidator import TimeValidator, check_if_format_matches
from src.Validator.Parser import decode_record
def get_setup_object():
f = open('sample.json', 'r')
file_content = json.load(f)['content']
validator = TimeValidator([decode_record(x) for x in file_content if x['miejsce'] != ''])
return validator
class CheckingValues(unittest.TestCase):
def test_check_if_format_matches(self):
self.assertEqual(True, check_if_format_matches({"godz": '10:00'}))
self.assertEqual(True, check_if_format_matches({"godz": '7:00'}))
self.assertEqual(False, check_if_format_matches({"godz": '1a:00'}))
self.assertEqual(False, check_if_format_matches({"godz": '10:b0'}))
def test_validate_format_without_error(self):
validator = get_setup_object()
self.assertEqual([], validator.validate_time_format())
def test_validate_format_with_error(self):
validator = get_setup_object()
error_causing_object = {"godz": "7:4b", "wym": 28, "miejsce": "D17 1.38", "pora": "Z", "przedmiot": "Algebra",
"tyg": "",
"obier": "", "dzien": "Wd", "prow": "wms", "osoba": "Przybyło Jakub", "grupa": "",
"studia": "s1",
"koniec": "9:10", "typ": "W", "sem": 1}
validator.file.append(error_causing_object)
self.assertEqual([error_causing_object], validator.validate_time_format())
def test_validate_without_error(self):
validator = get_setup_object()
self.assertEqual([], validator.check_if_classes_overlap())
def test_validate_with_error(self):
validator = get_setup_object()
self.maxDiff = None
error_causing_object = {"godz": "7:40", "wym": 28, "miejsce": "D17 1.38", "pora": "Z", "przedmiot": "Algebra",
"tyg": "",
"obier": "", "dzien": "Wt", "prow": "wms", "osoba": 'Przybyło Jakub', "grupa": "",
"studia": "s1",
"koniec": "9:10", "typ": "W", "sem": 1}
validator.file.append(error_causing_object)
self.assertEqual([(error_causing_object, error_causing_object)], validator.check_if_classes_overlap())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, serge van Ginderachter <serge@vanginderachter.be>
# based on Matt Hite's bigip_pool module
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_monitor_http
short_description: "Manages F5 BIG-IP LTM http monitors"
description:
- "Manages F5 BIG-IP LTM monitors via iControl SOAP API"
version_added: "1.4"
author: Serge van Ginderachter
notes:
- "Requires BIG-IP software version >= 11"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx"
requirements:
- bigsuds
options:
server:
description:
- BIG-IP host
required: true
default: null
user:
description:
- BIG-IP username
required: true
default: null
password:
description:
- BIG-IP password
required: true
default: null
state:
description:
- Monitor state
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Monitor name
required: true
default: null
aliases: ['monitor']
partition:
description:
- Partition for the monitor
required: false
default: 'Common'
parent:
description:
- The parent template of this monitor template
required: false
default: 'http'
parent_partition:
description:
- Partition for the parent monitor
required: false
default: 'Common'
send:
description:
- The send string for the monitor call
required: true
default: none
receive:
description:
- The receive string for the monitor call
required: true
default: none
receive_disable:
description:
- The receive disable string for the monitor call
required: true
default: none
ip:
description:
- IP address part of the ipport definition. The default API setting
is "0.0.0.0".
required: false
default: none
port:
description:
- port address part op the ipport definition. The default API
setting is 0.
required: false
default: none
interval:
description:
- The interval specifying how frequently the monitor instance
of this template will run. By default, this interval is used for up and
down states. The default API setting is 5.
required: false
default: none
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. The default API setting
is 16.
required: false
default: none
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. The default API setting is 0.
required: false
default: none
'''
EXAMPLES = '''
- name: BIGIP F5 | Create HTTP Monitor
local_action:
module: bigip_monitor_http
state: present
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ item.monitorname }}"
send: "{{ item.send }}"
receive: "{{ item.receive }}"
with_items: f5monitors
- name: BIGIP F5 | Remove HTTP Monitor
local_action:
module: bigip_monitor_http
state: absent
server: "{{ f5server }}"
user: "{{ f5user }}"
password: "{{ f5password }}"
name: "{{ monitorname }}"
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
TEMPLATE_TYPE = 'TTYPE_HTTP'
DEFAULT_PARENT_TYPE = 'http'
# ===========================================
# bigip_monitor module generic methods.
# these should be re-useable for other monitor types
#
def bigip_api(bigip, user, password):
api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
return api
def check_monitor_exists(module, api, monitor, parent):
# hack to determine if monitor exists
result = False
try:
ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0]
parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0]
if ttype == TEMPLATE_TYPE and parent == parent2:
result = True
else:
module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent))
except bigsuds.OperationFailed, e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_monitor(api, monitor, template_attributes):
try:
api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes])
except bigsuds.OperationFailed, e:
if "already exists" in str(e):
return False
else:
# genuine exception
raise
return True
def delete_monitor(api, monitor):
try:
api.LocalLB.Monitor.delete_template(template_names=[monitor])
except bigsuds.OperationFailed, e:
# maybe it was deleted since we checked
if "was not found" in str(e):
return False
else:
# genuine exception
raise
return True
def check_string_property(api, monitor, str_property):
try:
return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
def set_string_property(api, monitor, str_property):
api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property])
def check_integer_property(api, monitor, int_property):
try:
return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0]
except bigsuds.OperationFailed, e:
# happens in check mode if not created yet
if "was not found" in str(e):
return True
else:
# genuine exception
raise
def set_integer_property(api, monitor, int_property):
api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property])
def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties):
changed = False
for str_property in template_string_properties:
if str_property['value'] is not None and not check_string_property(api, monitor, str_property):
if not module.check_mode:
set_string_property(api, monitor, str_property)
changed = True
for int_property in template_integer_properties:
if int_property['value'] is not None and not check_integer_property(api, monitor, int_property):
if not module.check_mode:
set_integer_property(api, monitor, int_property)
changed = True
return changed
def get_ipport(api, monitor):
return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0]
def set_ipport(api, monitor, ipport):
try:
api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport])
return True, ""
except bigsuds.OperationFailed, e:
if "Cannot modify the address type of monitor" in str(e):
return False, "Cannot modify the address type of monitor if already assigned to a pool."
else:
# genuine exception
raise
# ===========================================
# main loop
#
# writing a module for other monitor types should
# only need an updated main() (and monitor specific functions)
def main():
# begin monitor specific stuff
module = AnsibleModule(
argument_spec = dict(
server = dict(required=True),
user = dict(required=True),
password = dict(required=True),
partition = dict(default='Common'),
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
parent = dict(default=DEFAULT_PARENT_TYPE),
parent_partition = dict(default='Common'),
send = dict(required=False),
receive = dict(required=False),
receive_disable = dict(required=False),
ip = dict(required=False),
port = dict(required=False, type='int'),
interval = dict(required=False, type='int'),
timeout = dict(required=False, type='int'),
time_until_up = dict(required=False, type='int', default=0)
),
supports_check_mode=True
)
server = module.params['server']
user = module.params['user']
password = module.params['password']
partition = module.params['partition']
parent_partition = module.params['parent_partition']
state = module.params['state']
name = module.params['name']
parent = "/%s/%s" % (parent_partition, module.params['parent'])
monitor = "/%s/%s" % (partition, name)
send = module.params['send']
receive = module.params['receive']
receive_disable = module.params['receive_disable']
ip = module.params['ip']
port = module.params['port']
interval = module.params['interval']
timeout = module.params['timeout']
time_until_up = module.params['time_until_up']
# end monitor specific stuff
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
api = bigip_api(server, user, password)
monitor_exists = check_monitor_exists(module, api, monitor, parent)
# ipport is a special setting
if monitor_exists: # make sure to not update current settings if not asked
cur_ipport = get_ipport(api, monitor)
if ip is None:
ip = cur_ipport['ipport']['address']
if port is None:
port = cur_ipport['ipport']['port']
else: # use API defaults if not defined to create it
if interval is None:
interval = 5
if timeout is None:
timeout = 16
if ip is None:
ip = '0.0.0.0'
if port is None:
port = 0
if send is None:
send = ''
if receive is None:
receive = ''
if receive_disable is None:
receive_disable = ''
# define and set address type
if ip == '0.0.0.0' and port == 0:
address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT'
elif ip == '0.0.0.0' and port != 0:
address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT'
elif ip != '0.0.0.0' and port != 0:
address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT'
else:
address_type = 'ATYPE_UNSET'
ipport = {'address_type': address_type,
'ipport': {'address': ip,
'port': port}}
template_attributes = {'parent_template': parent,
'interval': interval,
'timeout': timeout,
'dest_ipport': ipport,
'is_read_only': False,
'is_directly_usable': True}
# monitor specific stuff
template_string_properties = [{'type': 'STYPE_SEND',
'value': send},
{'type': 'STYPE_RECEIVE',
'value': receive},
{'type': 'STYPE_RECEIVE_DRAIN',
'value': receive_disable}]
template_integer_properties = [{'type': 'ITYPE_INTERVAL',
'value': interval},
{'type': 'ITYPE_TIMEOUT',
'value': timeout},
{'type': 'ITYPE_TIME_UNTIL_UP',
'value': time_until_up}]
# main logic, monitor generic
try:
result = {'changed': False} # default
if state == 'absent':
if monitor_exists:
if not module.check_mode:
# possible race condition if same task
# on other node deleted it first
result['changed'] |= delete_monitor(api, monitor)
else:
result['changed'] |= True
else: # state present
## check for monitor itself
if not monitor_exists: # create it
if not module.check_mode:
# again, check changed status here b/c race conditions
# if other task already created it
result['changed'] |= create_monitor(api, monitor, template_attributes)
else:
result['changed'] |= True
## check for monitor parameters
# whether it already existed, or was just created, now update
# the update functions need to check for check mode but
# cannot update settings if it doesn't exist which happens in check mode
result['changed'] |= update_monitor_properties(api, module, monitor,
template_string_properties,
template_integer_properties)
# we just have to update the ipport if monitor already exists and it's different
if monitor_exists and cur_ipport != ipport:
set_ipport(api, monitor, ipport)
result['changed'] |= True
#else: monitor doesn't exist (check mode) or ipport is already ok
except Exception, e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
|
from ws.RLUtils.common.app_info_lib import DotDict
app_info = DotDict(
{
"NUM_EPISODES": 500,
"BATCH_SIZE": 64,
"GAMMA": 0.99,
"LEARNING_RATE": 0.01,
"EPSILON": 0.1,
"RHO": 0.99,
"DISCOUNT_FACTOR": 0.9,
}
)
def fn_add_configs(api_info):
for k, v in app_info.items():
api_info[k] = v
|
import numpy as np
from opytimizer.optimizers.swarm import ffoa
from opytimizer.spaces import search
def test_ffoa_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_ffoa = ffoa.FFOA()
new_ffoa.compile(search_space)
try:
new_ffoa.x_axis = 1
except:
new_ffoa.x_axis = []
assert new_ffoa.x_axis == []
try:
new_ffoa.y_axis = 1
except:
new_ffoa.y_axis = []
assert new_ffoa.y_axis == []
def test_ffoa_update():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_ffoa = ffoa.FFOA()
new_ffoa.compile(search_space)
new_ffoa.update(search_space, square)
|
import pytest
import os
from SCNIC.general import simulate_correls
from SCNIC.within_correls import within_correls
from biom.util import biom_open
@pytest.fixture()
def args1():
class Arguments(object):
def __init__(self):
self.input = "table1.biom"
self.output = "out_dir"
self.correl_method = "spearman"
self.p_adjust = "bh"
self.outlier_removal = False
self.verbose = False
self.force = False
self.min_sample = 2
self.sparcc_filter = False
self.procs = 1
self.sparcc_p = None
return Arguments()
@pytest.fixture()
def args2():
class Arguments(object):
def __init__(self):
self.input = "table1.biom"
self.output = "out_dir"
self.correl_method = "spearman"
self.p_adjust = None
self.outlier_removal = False
self.verbose = True
self.force = False
self.min_sample = None
self.sparcc_filter = True
self.procs = 1
self.sparcc_p = None
return Arguments()
# integration test
def test_within_correls_classic_correlation_min_r_min_sample(args1, tmpdir):
table = simulate_correls()
loc = tmpdir.mkdir("with_correls_test")
with biom_open(str(loc.join("table1.biom")), 'w') as f:
table.to_hdf5(f, 'madebyme')
os.chdir(str(loc))
within_correls(args1)
files = os.listdir(str(loc)+'/out_dir')
assert "correls.txt" in files
# integration test
def test_within_correls_classic_correlation_min_r_sparcc_filter(args2, tmpdir):
table = simulate_correls()
loc = tmpdir.mkdir("with_correls_test")
with biom_open(str(loc.join("table1.biom")), 'w') as f:
table.to_hdf5(f, 'madebyme')
os.chdir(str(loc))
within_correls(args2)
files = os.listdir(str(loc)+'/out_dir')
assert "correls.txt" in files
|
# 09/04/2017
from time import time
is_palindrome = lambda x: x == x[::-1]
for dic in ["enable1.txt", "natura.txt"]:
with open("../../other/" + dic, encoding="utf-8") as f:
words = {s.strip() for s in f}
out = []
start = time()
for w in words:
prefix = [w[:i][::-1] for i in range(0, len(w))]
suffix = [w[i:][::-1] for i in range(0, len(w))]
for (x, y) in zip(prefix, suffix):
if x in words and x != w and is_palindrome(w + x):
out.append(w + " " + x)
if y in words and y != w and is_palindrome(y + w):
out.append(y + " " + w)
elapsed_time = time() - start
print('{}: Found {} two-word palindromes in {} s.'.format(dic, len(out), elapsed_time))
|
"""
HexitecFEM for Hexitec ODIN control.
Christian Angelsen, STFC Detector Systems Software Group, 2019.
"""
from __future__ import division
import numpy as np
# Required to convert str to bool:
import distutils.util
import time
from datetime import datetime
import logging
import configparser
import os
from hexitec.RdmaUDP import RdmaUDP
from concurrent import futures
from socket import error as socket_error
from odin.adapters.parameter_tree import ParameterTree, ParameterTreeError
from tornado.ioloop import IOLoop
class HexitecFem():
"""
Hexitec Fem class. Represents a single FEM-II module.
Controls and configures each FEM-II module ready for a DAQ via UDP.
"""
OPTIONS = [
"Sensor_1_1",
"Sensor_1_2",
"Sensor_2_1",
"Sensor_2_2"
]
IMAGE = [
"LOG HDF5 FILE",
"LOG BIN FILE",
"STREAM DATA",
"NETWORK PACKETS ONLY"
]
READOUTMODE = [
"SINGLE",
"2x2"
]
VSR_ADDRESS = [
0x90,
0x91
]
SENSORS_READOUT_OK = 7
HEX_ASCII_CODE = [0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46]
DAC_SCALE_FACTOR = 0.732
SEND_REG_VALUE = 0x40
READ_REG_VALUE = 0x41
SET_REG_BIT = 0x42
CLR_REG_BIT = 0x43
SEND_REG_BURST = 0x44
SEND_REG_STREAM = 0x46 # Currently unused
READ_PWR_VOLT = 0x50
WRITE_REG_VAL = 0x53
WRITE_DAC_VAL = 0x54
CTRL_ADC_DAC = 0x55
# Define timestamp format
DATE_FORMAT = '%Y%m%d_%H%M%S.%f'
def __init__(self, parent, fem_id=1,
server_ctrl_ip_addr='10.0.2.2', camera_ctrl_ip_addr='10.0.2.1',
server_data_ip_addr='10.0.4.2', camera_data_ip_addr='10.0.4.1'):
"""
Initialize the HexitecFem object.
This constructor initializes the HexitecFem object.
:param parent: Reference to adapter object
:param fem_id: HexitecFem object identifier
:param server_ctrl_ip_addr: PC interface for control path
:param camera_ctrl_ip_addr: FEM interface for control path
:param server_data_ip_addr: PC interface for data path
:param camera_data_ip_addr: FEM interface for data path
"""
# Give access to parent class (Hexitec) - for potential future use
self.parent = parent
self.id = int(fem_id)
self.x10g_rdma = None
# 10G RDMA IP addresses
self.server_ctrl_ip_addr = server_ctrl_ip_addr
self.camera_ctrl_ip_addr = camera_ctrl_ip_addr
# FPGA base addresses
self.rdma_addr = {
"receiver": 0xC0000000,
"frm_gate": 0xD0000000,
"reset_monitor": 0x90000000
}
self.image_size_x = 0x100
self.image_size_y = 0x100
self.image_size_p = 0x8
self.image_size_f = 0x8
self.strm_mtu = 8000
self.vsr_addr = HexitecFem.VSR_ADDRESS[0]
self.number_frames = 10
self.number_frames_backed_up = 0
self.first_initialisation = True
self.hardware_connected = False
self.hardware_busy = False
self.ignore_busy = False
self.health = True
# Construct path to hexitec source code
cwd = os.getcwd()
index = cwd.rfind("control")
self.base_path = cwd[:index]
# Variables supporting frames to duration conversion
self.row_s1 = 135
self.s1_sph = 1
self.sph_s2 = 5
self.frame_rate = 0
self.duration = 1
self.duration_enabled = False
self.bias_refresh_interval = 60.0
self.bias_voltage_refresh = False
self.time_refresh_voltage_held = 3.0
self.bias_voltage_settle_time = 2.0
# Acquisition completed, note completion timestamp
self.acquisition_completed = False
self.debug = False
# Diagnostics:
self.exception_triggered = False
self.successful_reads = 0
self.acquisition_duration = ""
self.status_message = ""
self.status_error = ""
self.stop_acquisition = False
self.initialise_progress = 0
self.operation_percentage_complete = 0
self.operation_percentage_steps = 108
self.selected_sensor = HexitecFem.OPTIONS[2] # "Sensor_2_1"
self.sensors_layout = HexitecFem.READOUTMODE[1] # "2x2"
self.vsr1_ambient = 0
self.vsr1_humidity = 0
self.vsr1_asic1 = 0
self.vsr1_asic2 = 0
self.vsr1_adc = 0
self.vsr1_hv = 0
self.vsr1_sync = -1
self.vsr2_ambient = 0
self.vsr2_humidity = 0
self.vsr2_asic1 = 0
self.vsr2_asic2 = 0
self.vsr2_adc = 0
self.vsr2_hv = 0
self.vsr2_sync = -1
self.read_firmware_version = True
self.firmware_date = "N/A"
self.firmware_time = "N/A"
# Variables supporting handling of ini-style hexitec config file
self.hexitec_config = "(Blank)"
self.hexitec_parameters = {}
self.acquire_start_time = ""
self.acquire_stop_time = ""
self.acquire_time = 0.0
self.acquire_timestamp = 0
param_tree_dict = {
"diagnostics": {
"successful_reads": (lambda: self.successful_reads, None),
"acquire_start_time": (lambda: self.acquire_start_time, None),
"acquire_stop_time": (lambda: self.acquire_stop_time, None),
"acquire_time": (lambda: self.acquire_time, None),
},
"id": (lambda: self.id, None),
"debug": (self.get_debug, self.set_debug),
"frame_rate": (lambda: self.frame_rate, None),
"health": (lambda: self.health, None),
"status_message": (self._get_status_message, None),
"status_error": (self._get_status_error, None),
"initialise_progress": (self._get_initialise_progress, None),
"operation_percentage_complete": (self._get_operation_percentage_complete, None),
"number_frames": (self.get_number_frames, self.set_number_frames),
"duration": (self.get_duration, self.set_duration),
"hexitec_config": (lambda: self.hexitec_config, self._set_hexitec_config),
"read_sensors": (None, self.read_sensors),
"hardware_connected": (lambda: self.hardware_connected, None),
"hardware_busy": (lambda: self.hardware_busy, None),
"firmware_date": (lambda: self.firmware_date, None),
"firmware_time": (lambda: self.firmware_time, None),
"vsr1_sync": (lambda: self.vsr1_sync, None),
"vsr2_sync": (lambda: self.vsr2_sync, None),
"vsr1_sensors": {
"ambient": (lambda: self.vsr1_ambient, None),
"humidity": (lambda: self.vsr1_humidity, None),
"asic1": (lambda: self.vsr1_asic1, None),
"asic2": (lambda: self.vsr1_asic2, None),
"adc": (lambda: self.vsr1_adc, None),
"hv": (lambda: self.vsr1_hv, None),
},
"vsr2_sensors": {
"ambient": (lambda: self.vsr2_ambient, None),
"humidity": (lambda: self.vsr2_humidity, None),
"asic1": (lambda: self.vsr2_asic1, None),
"asic2": (lambda: self.vsr2_asic2, None),
"adc": (lambda: self.vsr2_adc, None),
"hv": (lambda: self.vsr2_hv, None),
}
}
self.waited = 0.0
self.param_tree = ParameterTree(param_tree_dict)
def __del__(self):
"""Ensure rdma connection closed."""
if self.x10g_rdma is not None:
self.x10g_rdma.close()
def connect(self, bDebug=False):
"""Set up hardware connection."""
try:
self.x10g_rdma = RdmaUDP(self.server_ctrl_ip_addr, 61650,
self.server_ctrl_ip_addr, 61651,
self.camera_ctrl_ip_addr, 61650,
self.camera_ctrl_ip_addr, 61651,
2000000, 9000, 20)
self.x10g_rdma.setDebug(False)
self.x10g_rdma.ack = True
except socket_error as e:
raise socket_error("Failed to setup Control connection: %s" % e)
return
def read_sensors(self, msg=None):
"""Read environmental sensors and updates parameter tree with results."""
try:
# Note once, when firmware was built
if self.read_firmware_version:
date = self.x10g_rdma.read(0x60000015, 'FIRMWARE DATE')
time = self.x10g_rdma.read(0x60000016, 'FIRMWARE TIME')
date = format(date, '#010x')
time = format(time, '#06x')
self.firmware_date = "{0:.2}/{1:.2}/{2:.4}".format(date[2:4], date[4:6], date[6:10])
self.firmware_time = "{0:.2}:{1:.2}".format(time[2:4], time[4:6])
self.read_firmware_version = False
vsr = self.vsr_addr
self.vsr_addr = HexitecFem.VSR_ADDRESS[0]
self.read_temperatures_humidity_values()
self.read_pwr_voltages() # pragma: no cover
self.vsr_addr = HexitecFem.VSR_ADDRESS[1] # pragma: no cover
self.read_temperatures_humidity_values() # pragma: no cover
self.read_pwr_voltages() # pragma: no cover
self.vsr_addr = vsr # pragma: no cover
except (HexitecFemError, ParameterTreeError) as e:
self._set_status_error("Failed to read sensors: %s" % str(e))
logging.error("%s" % str(e))
except Exception as e:
self._set_status_error("Uncaught Exception; Reading sensors failed: %s" % str(e))
logging.error("%s" % str(e))
def disconnect(self):
"""Disconnect hardware connection."""
self.x10g_rdma.close()
def cleanup(self):
"""Cleanup connection."""
self.disconnect()
def set_image_size(self, x_size, y_size, p_size, f_size):
"""Set image size, function inherited from JE/RH."""
# set image size globals
self.image_size_x = x_size
self.image_size_y = y_size
self.image_size_p = p_size
self.image_size_f = f_size
# check parameters againts ethernet packet and local link frame size compatibility
pixel_count_max = x_size * y_size
number_bytes = pixel_count_max * 2
number_bytes_r4 = pixel_count_max % 4
number_bytes_r8 = number_bytes % 8
first_packets = number_bytes // self.strm_mtu
last_packet_size = number_bytes % self.strm_mtu
lp_number_bytes_r8 = last_packet_size % 8
lp_number_bytes_r32 = last_packet_size % 32
size_status = number_bytes_r4 + number_bytes_r8 + lp_number_bytes_r8 + lp_number_bytes_r32
# calculate pixel packing settings
if p_size >= 11 and p_size <= 14 and f_size == 16:
pixel_count_max = pixel_count_max // 2
elif p_size == 8 and f_size == 8:
pixel_count_max = pixel_count_max // 4 # pragma: no cover
else:
size_status = size_status + 1
# Set up registers if no size errors
if size_status != 0:
logging.error("%-32s %8i %8i %8i %8i %8i %8i" %
('Size error', number_bytes, number_bytes_r4, number_bytes_r8,
first_packets, lp_number_bytes_r8, lp_number_bytes_r32))
else:
address = self.rdma_addr["receiver"] | 0x01
data = (pixel_count_max & 0x1FFFF) - 1
self.x10g_rdma.write(address, data, 'pixel count max')
self.x10g_rdma.write(self.rdma_addr["receiver"] + 4, 0x3, 'pixel bit size => 16 bit')
return
def frame_gate_trigger(self):
"""Reset monitors, pulse frame gate."""
# the reset of monitors suggested by Rob:
self.x10g_rdma.write(self.rdma_addr["reset_monitor"] + 0, 0x0, 'reset monitor off')
self.x10g_rdma.write(self.rdma_addr["reset_monitor"] + 0, 0x1, 'reset monitor on')
self.x10g_rdma.write(self.rdma_addr["reset_monitor"] + 0, 0x0, 'reset monitor off')
self.x10g_rdma.write(self.rdma_addr["frm_gate"] + 0, 0x0, 'frame gate trigger off')
self.x10g_rdma.write(self.rdma_addr["frm_gate"] + 0, 0x1, 'frame gate trigger on')
self.x10g_rdma.write(self.rdma_addr["frm_gate"] + 0, 0x0, 'frame gate trigger off')
def frame_gate_settings(self, frame_number, frame_gap):
"""Set frame gate settings."""
self.x10g_rdma.write(self.rdma_addr["frm_gate"] + 1, frame_number,
'frame gate frame number')
self.x10g_rdma.write(self.rdma_addr["frm_gate"] + 2, frame_gap, 'frame gate frame gap')
def data_stream(self, num_images):
"""Trigger FEM to output data."""
self.frame_gate_settings(num_images - 1, 0)
self.frame_gate_trigger()
def _get_operation_percentage_complete(self):
return self.operation_percentage_complete
def _get_initialise_progress(self):
return self.initialise_progress
def _get_status_message(self):
return self.status_message
def _set_status_message(self, message):
self.status_message = message
def _get_status_error(self):
return self.status_error
def _set_status_error(self, error):
self.health = True if error == "" else False
self.status_error = str(error)
def set_duration_enable(self, duration_enabled):
"""Set duration (enable) or number of frames (disable)."""
self.duration_enabled = duration_enabled
def get_number_frames(self):
"""Get number of frames."""
return self.number_frames
def set_number_frames(self, frames):
"""Set number of frames, initialise frame_rate if not set."""
self.frame_rate = 1 if (self.frame_rate == 0) else self.frame_rate
if self.number_frames != frames:
self.number_frames = frames
self.duration = self.number_frames / self.frame_rate
def get_duration(self):
"""Set acquisition duration."""
return self.duration
def set_duration(self, duration):
"""Set duration, calculate frames to acquire using frame rate."""
self.duration = duration
frames = self.duration * self.frame_rate
self.number_frames = int(round(frames))
def get_health(self):
"""Get FEM health status."""
return self.health
def get_id(self):
"""Get FEM id."""
return self.id
def _start_polling(self): # pragma: no cover
IOLoop.instance().add_callback(self.poll_sensors)
def poll_sensors(self):
"""Poll hardware while connected but not busy initialising, collecting offsets, etc."""
if self.hardware_connected and (self.hardware_busy is False):
self.read_sensors()
IOLoop.instance().call_later(1.0, self.poll_sensors)
def connect_hardware(self, msg=None):
"""Connect with hardware, wait 10 seconds for the VSRs' FPGAs to initialise."""
try:
if self.hardware_connected:
raise ParameterTreeError("Connection already established")
else:
self._set_status_error("")
self.operation_percentage_complete = 0
self._set_status_message("Connecting to camera..")
self.cam_connect()
msg = "Camera connected. Waiting for VSRs' FPGAs to initialise.."
self._set_status_message(msg)
self._wait_while_fpgas_initialise()
self.initialise_progress = 0
except ParameterTreeError as e:
self._set_status_error("%s" % str(e))
except HexitecFemError as e:
self._set_status_error("Failed to connect with camera: %s" % str(e))
self._set_status_message("Is the camera powered?")
logging.error("%s" % str(e))
except Exception as e:
error = "Uncaught Exception; Camera connection: %s" % str(e)
self._set_status_error(error)
logging.error("Camera connection: %s" % str(e))
# Cannot raise error beyond current thread
# print("\n\nReinstate polling before merging with master !\n\n")
# Start polling thread (connect successfully set up)
if len(self.status_error) == 0:
self._start_polling()
def initialise_hardware(self, msg=None):
"""Initialise sensors, load enables, etc to initialise both VSR boards."""
try:
if self.hardware_connected is not True:
raise ParameterTreeError("No connection established")
if self.hardware_busy:
raise HexitecFemError("Hardware sensors busy initialising")
else:
self._set_status_error("")
self.hardware_busy = True
self.operation_percentage_complete = 0
self.operation_percentage_steps = 108
self.initialise_system()
if self.first_initialisation:
# On cold start: Fudge initialisation to include silently capturing data without
# writing to disk, giving the user option to collect images with offsets
# without requiring a dummy data collection
# "Tell" collect_data function hardware isn't busy, or it'll throw error
self.ignore_busy = True
if self.parent.daq.in_progress:
logging.warning("Cannot Start Acquistion: Already in progress")
else:
# Start daq, expecting to collect 2 token frames
# Token gesture as file writing disabled
self.parent.daq.start_acquisition(2)
for fem in self.parent.fems:
fem.collect_data()
else:
# Not cold initialisation, clear hardware_busy here
self.hardware_busy = False
self.initialise_progress = 0
except (HexitecFemError, ParameterTreeError) as e:
self._set_status_error("Failed to initialise camera: %s" % str(e))
logging.error("%s" % str(e))
except Exception as e:
self._set_status_error("Uncaught Exception; Camera initialisation failed: %s" % str(e))
logging.error("%s" % str(e))
def collect_data(self, msg=None):
"""Acquire data from camera."""
try:
if self.hardware_connected is not True:
raise ParameterTreeError("No connection established")
if self.hardware_busy and (self.ignore_busy is False):
raise HexitecFemError("Hardware sensors busy initialising")
else:
self._set_status_error("")
# Clear ignore_busy if set
if self.ignore_busy:
self.ignore_busy = False
self.hardware_busy = True
self.operation_percentage_complete = 0
self.operation_percentage_steps = 100
self._set_status_message("Acquiring data..")
self.acquire_data()
except (HexitecFemError, ParameterTreeError) as e:
self._set_status_error("Failed to collect data: %s" % str(e))
logging.error("%s" % str(e))
except Exception as e:
self._set_status_error("Uncaught Exception; Data collection failed: %s" % str(e))
logging.error("%s" % str(e))
def disconnect_hardware(self, msg=None):
"""Disconnect camera."""
try:
if self.hardware_connected is False:
raise ParameterTreeError("No connection to disconnect")
else:
self._set_status_error("")
# Stop acquisition if it's hung
if self.operation_percentage_complete < 100:
self.stop_acquisition = True
self.hardware_connected = False
self.operation_percentage_complete = 0
self._set_status_message("Disconnecting camera..")
self.cam_disconnect()
self._set_status_message("Camera disconnected")
self.operation_percentage_complete = 100
self.initialise_progress = 0
except (HexitecFemError, ParameterTreeError) as e:
self._set_status_error("Failed to disconnect: %s" % str(e))
logging.error("%s" % str(e))
except Exception as e:
self._set_status_error("Uncaught Exception; Disconnection failed: %s" % str(e))
logging.error("%s" % str(e))
def set_debug(self, debug):
"""Set debug messages on or off."""
self.debug = debug
def get_debug(self):
"""Get debug messages status."""
return self.debug
def _wait_while_fpgas_initialise(self):
"""Set up to wait 10 seconds to allow VSRs' FPGAs to initialise."""
self.hardware_busy = True
self.start = time.time()
self.delay = 10
IOLoop.instance().call_later(1.0, self.initialisation_check_loop)
def initialisation_check_loop(self):
"""Check for error and call itself each second until 10 second delay fulfilled."""
if len(self.status_error) > 0:
self.operation_percentage_complete = 0
self.hardware_busy = False
return
self.delay = time.time() - self.start
self.operation_percentage_complete += 10
if (self.delay < 10):
IOLoop.instance().call_later(1.0, self.initialisation_check_loop)
else:
self.hardware_busy = False
self._set_status_message("Camera connected. FPGAs initialised.")
def send_cmd(self, cmd, track_progress=True):
"""Send a command string to the microcontroller."""
if track_progress:
self.initialise_progress += 1
self.operation_percentage_complete = (self.initialise_progress * 100) \
// self.operation_percentage_steps
while len(cmd) % 4 != 0:
cmd.append(13)
if self.debug:
logging.debug("Length of command - %s %s" % (len(cmd), len(cmd) % 4))
for i in range(0, len(cmd) // 4):
reg_value = 256 * 256 * 256 * cmd[(i * 4)] + 256 * 256 * cmd[(i * 4) + 1] \
+ 256 * cmd[(i * 4) + 2] + cmd[(i * 4) + 3]
self.x10g_rdma.write(0xE0000100, reg_value, 'Write 4 Bytes')
def read_response(self):
"""Read a VSR's microcontroller response, passed on by the FEM."""
data_counter = 0
f = []
ABORT_VALUE = 10000
RETURN_START_CHR = 42
CARRIAGE_RTN = 13
FIFO_EMPTY_FLAG = 1
empty_count = 0
daty = RETURN_START_CHR
# Example: daty will contain:
# 0x23, self.vsr_addr, HexitecFem.SEND_REG_VALUE, 0x30, 0x41, 0x30, 0x30, 0x0D
# JE modifications
daty1, daty2 = RETURN_START_CHR, RETURN_START_CHR
daty3, daty4 = RETURN_START_CHR, RETURN_START_CHR
while daty != CARRIAGE_RTN:
fifo_empty = FIFO_EMPTY_FLAG
while fifo_empty == FIFO_EMPTY_FLAG and empty_count < ABORT_VALUE:
fifo_empty = self.x10g_rdma.read(0xE0000011, 'FIFO EMPTY FLAG')
empty_count = empty_count + 1
dat = self.x10g_rdma.read(0xE0000200, 'Data')
daty = (dat >> 24) & 0xFF
f.append(daty)
daty1 = daty
daty = (dat >> 16) & 0xFF
f.append(daty)
daty2 = daty
daty = (dat >> 8) & 0xFF
f.append(daty)
daty3 = daty
daty = dat & 0xFF
f.append(daty)
daty4 = daty
if self.debug:
logging.debug('{0:0{1}x} {2:0{3}x} {4:0{5}x} {6:0{7}x}'.format(daty1, 2, daty2, 2,
daty3, 2, daty4, 2))
data_counter = data_counter + 1
if empty_count == ABORT_VALUE:
logging.error("Error: read_response from FEM aborted")
self.exception_triggered = True
raise HexitecFemError("read_response aborted")
empty_count = 0
# Diagnostics: Count number of successful reads before 1st Exception thrown
if self.exception_triggered is False:
self.successful_reads += 1
if self.debug:
logging.debug("Counter is :- %s Length is:- %s" % (data_counter, len(f)))
fifo_empty = self.x10g_rdma.read(0xE0000011, 'Data')
if self.debug:
logging.debug("FIFO should be empty: %s" % fifo_empty)
s = ''
for i in range(1, data_counter * 4):
s = s + chr(f[i])
if self.debug:
logging.debug("String :- %s" % s)
logging.debug(f[0])
logging.debug(f[1])
logging.debug(f[2])
logging.debug(f[3])
return s
def cam_connect(self):
"""Send commands to connect camera."""
self.hardware_connected = True
logging.debug("Connecting camera")
try:
self.connect()
logging.debug("Camera connected")
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], 0xE3, 0x0D])
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], 0xE3, 0x0D])
logging.debug("Modules Enabled")
except socket_error as e:
self.hardware_connected = False
raise HexitecFemError(e)
def cam_disconnect(self):
"""Send commands to disconnect camera."""
self.hardware_connected = False
try:
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], 0xE2, 0x0D])
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], 0xE2, 0x0D])
logging.debug("Modules Disabled")
self.disconnect()
logging.debug("Camera is Disconnected")
except socket_error as e:
logging.error("Unable to disconnect camera: %s" % str(e))
raise HexitecFemError(e)
except AttributeError as e:
logging.error("Unable to disconnect camera: %s" % "No active connection")
raise HexitecFemError("%s; %s" % (e, "No active connection"))
def initialise_sensor(self):
"""Initialise sensors attached to selected VSR."""
self.x10g_rdma.write(0x60000002, 0, 'Disable State Machine Trigger')
logging.debug("Disable State Machine Enabling signal")
if self.selected_sensor == HexitecFem.OPTIONS[0]:
self.x10g_rdma.write(0x60000004, 0, 'Set bit 0 to 1 to generate test pattern in FEMII, \
bits [2:1] select which of the 4 sensors is read - data 1_1')
logging.debug("Initialising sensors on board VSR_1")
self.vsr_addr = HexitecFem.VSR_ADDRESS[0]
if self.selected_sensor == HexitecFem.OPTIONS[2]:
self.x10g_rdma.write(0x60000004, 4, 'Set bit 0 to 1 to generate test pattern in FEMII, \
bits [2:1] select which of the 4 sensors is read - data 2_1')
logging.debug("Initialising sensors on board VSR 2")
self.vsr_addr = HexitecFem.VSR_ADDRESS[1]
if self.sensors_layout == HexitecFem.READOUTMODE[0]:
logging.debug("Disable synchronisation SM start")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SEND_REG_VALUE, 0x30, 0x41,
0x30, 0x30, 0x0D])
self.read_response()
logging.debug("Reading out single sensor")
elif self.sensors_layout == HexitecFem.READOUTMODE[1]:
logging.debug("Enable synchronisation SM start via trigger 1")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x30, 0x41,
0x30, 0x31, 0x0D])
self.read_response()
logging.debug("Reading out 2x2 sensors")
logging.debug("Communicating with - %s" % self.vsr_addr)
# Set Frame Gen Mux Frame Gate
self.x10g_rdma.write(0x60000001, 2, 'Set Frame Gen Mux Frame Gate - works set to 2')
logging.debug("Enable Test Pattern in my VSR design")
# Use Sync clock from DAQ board
logging.debug("Use Sync clock from DAQ board")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x30, 0x31, 0x31, 0x30, 0x0D])
self.read_response()
logging.debug("Enable LVDS outputs")
set_register_vsr1_command = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.SET_REG_BIT,
0x30, 0x31, 0x32, 0x30, 0x0D]
set_register_vsr2_command = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.SET_REG_BIT,
0x30, 0x31, 0x32, 0x30, 0x0D]
self.send_cmd(set_register_vsr1_command)
self.read_response()
self.send_cmd(set_register_vsr2_command)
self.read_response()
logging.debug("LVDS outputs enabled")
# TODO: Did James mean "0x45" rather than "0x4E"..?
# The register is called "Serial Training Pattern" in documentation
logging.debug("Read LO IDLE")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SEND_REG_VALUE,
0x46, 0x45, 0x41, 0x41, 0x0D])
self.read_response()
logging.debug("Read HI IDLE")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SEND_REG_VALUE,
0x46, 0x46, 0x4E, 0x41, 0x0D])
self.read_response()
# This sets up test pattern on LVDS outputs
logging.debug("Set up LVDS test pattern")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.CLR_REG_BIT, 0x30, 0x31, 0x43, 0x30, 0x0D])
self.read_response()
# Use default test pattern of 1000000000000000
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x30, 0x31, 0x38, 0x30, 0x0D])
self.read_response()
full_empty = self.x10g_rdma.read(0x60000011, 'Check EMPTY Signals')
logging.debug("Check EMPTY Signals: %s" % full_empty)
full_empty = self.x10g_rdma.read(0x60000012, 'Check FULL Signals')
logging.debug("Check FULL Signals: %s" % full_empty)
def debug_reg24(self): # pragma: no cover
"""Debug function: Display contents of register 24."""
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.READ_REG_VALUE,
0x32, 0x34, 0x0D])
vsr2 = self.read_response().strip("\r")
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.READ_REG_VALUE,
0x32, 0x34, 0x0D])
vsr1 = self.read_response().strip("\r")
return (vsr2, vsr1)
def debug_reg89(self): # pragma: no cover
"""Debug function: Display contents of register 89."""
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.READ_REG_VALUE,
0x38, 0x39, 0x0D])
vsr2 = self.read_response().strip("\r")
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.READ_REG_VALUE,
0x38, 0x39, 0x0D])
vsr1 = self.read_response().strip("\r")
return (vsr2, vsr1)
def calibrate_sensor(self): # noqa: C901
"""Calibrate sensors attached to targeted VSR."""
if self.sensors_layout == HexitecFem.READOUTMODE[0]:
logging.debug("Reading out single sensor")
self.set_image_size(80, 80, 14, 16)
elif self.sensors_layout == HexitecFem.READOUTMODE[1]:
self.set_image_size(160, 160, 14, 16)
logging.debug("Reading out 2x2 sensors")
logging.debug("Clear bit 5")
# Clear bit; Register 0x24, bit5: disable VCAL (i.e. VCAL is here ENABLED)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.CLR_REG_BIT, 0x32, 0x34, 0x32, 0x30, 0x0D])
self.read_response()
logging.debug("Set bit 6")
# Clear bit; Register 0x24, bit6: test mode
self.send_cmd([0x23, self.vsr_addr, HexitecFem.CLR_REG_BIT, 0x32, 0x34, 0x34, 0x30, 0x0D])
self.read_response()
self.send_cmd([0x23, self.vsr_addr, HexitecFem.READ_REG_VALUE, 0x30, 0x31, 0x0D])
self.read_response()
# Set bit; Register 0x24, bit5 (disable VCAL), bit1 (capture average picture)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x32, 0x34, 0x32, 0x32, 0x0D])
self.read_response()
if self.selected_sensor == HexitecFem.OPTIONS[0]:
self.x10g_rdma.write(0x60000002, 1, 'Trigger Cal process : Bit1 - VSR2, Bit 0 - VSR1 ')
logging.debug("CALIBRATING VSR_1")
if self.selected_sensor == HexitecFem.OPTIONS[2]:
self.x10g_rdma.write(0x60000002, 2, 'Trigger Cal process : Bit1 - VSR2, Bit 0 - VSR1 ')
logging.debug("CALIBRATING VSR_2")
# Send command on CMD channel to FEMII
self.x10g_rdma.write(0x60000002, 0, 'Un-Trigger Cal process')
# Reading back Sync register
synced = self.x10g_rdma.read(0x60000010, 'Check LVDS has synced')
logging.debug("Sync Register value")
full_empty = self.x10g_rdma.read(0x60000011, 'Check FULL EMPTY Signals')
logging.debug("Check EMPTY Signals: %s" % full_empty)
full_empty = self.x10g_rdma.read(0x60000012, 'Check FULL FULL Signals')
logging.debug("Check FULL Signals: %s" % full_empty)
# Check whether the currently selected VSR has synchronised or not
if synced == 15: # pragma: no cover
logging.debug("All Links on VSR's 1 and 2 synchronised")
logging.debug("Starting State Machine in VSR's")
elif synced == 12: # pragma: no cover
logging.debug("Both Links on VSR 2 synchronised")
elif synced == 3: # pragma: no cover
logging.debug("Both Links on VSR 1 synchronised")
else:
logging.debug(synced)
if (self.vsr_addr == HexitecFem.VSR_ADDRESS[0]):
self.vsr1_sync = synced
elif (self.vsr_addr == HexitecFem.VSR_ADDRESS[1]):
self.vsr2_sync = synced
# Clear training enable
self.send_cmd([0x23, self.vsr_addr, HexitecFem.CLR_REG_BIT, 0x30, 0x31, 0x43, 0x30, 0x0D])
self.read_response()
logging.debug("Clear bit 5 - VCAL ENABLED")
# Clear bit; Register 0x24, bit5: disable VCAL (i.e. VCAL is here ENABLED)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.CLR_REG_BIT, 0x32, 0x34,
0x32, 0x30, 0x0D])
self.read_response()
logging.debug("DARK CORRECTION ON")
# Set bit; Register 0x24, bit3: enable DC spectroscopic mode
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x32, 0x34,
0x30, 0x38, 0x0D])
self.read_response()
# Read Reg24
self.send_cmd([0x23, self.vsr_addr, HexitecFem.READ_REG_VALUE, 0x32, 0x34, 0x0D])
if self.debug:
logging.debug("Reading Register 0x24")
logging.debug(self.read_response())
else:
self.read_response()
self.send_cmd([0x23, self.vsr_addr, HexitecFem.READ_REG_VALUE, 0x38, 0x39, 0x0D])
self.read_response()
if self.debug:
logging.debug("Poll register 0x89")
bPolling = True
time_taken = 0
while bPolling:
self.send_cmd([0x23, self.vsr_addr, HexitecFem.READ_REG_VALUE, 0x38, 0x39, 0x0D])
reply = self.read_response()
reply = reply.strip()
Bit1 = int(reply[-1], 16)
if self.debug:
logging.debug("Register 0x89, Bit1: %s" % Bit1)
# Is PLL locked? (Bit1 high)
if Bit1 & 2:
bPolling = False
else:
time.sleep(0.1)
time_taken += 0.1
if time_taken > 3.0:
raise HexitecFemError("Timed out polling register 0x89; PLL remains disabled")
if self.debug:
logging.debug("Bit 1 should be 1")
logging.debug(reply)
logging.debug("Read reg 1")
self.send_cmd([0x23, self.vsr_addr, HexitecFem.READ_REG_VALUE, 0x30, 0x31, 0x0D])
self.read_response()
full_empty = self.x10g_rdma.read(0x60000011, 'Check FULL EMPTY Signals')
logging.debug("Check EMPTY Signals: %s" % full_empty)
full_empty = self.x10g_rdma.read(0x60000012, 'Check FULL FULL Signals')
logging.debug("Check FULL Signals: %s" % full_empty)
return synced
def print_firmware_info(self): # pragma: no cover
"""Print info on loaded firmware.
0x80: F/W customer ID
0x81: F/W Project ID
0x82: F/W Version ID.
"""
print("__________F/W Customer, Project, and Version IDs__________")
for index in range(3):
(vsr2, vsr1) = self.debug_register(0x38, 0x30+index)
print(" Register 0x8{}, VSR2: {} VSR1: {}".format(index, vsr2, vsr1))
print("__________________________________________________________")
def acquire_data(self): # noqa: C901
"""Acquire data, polls fem for completion and reads out fem monitors."""
# If called as part of cold initialisation, only need one frame so
# temporarily overwrite UI's number of frames for this call only
self.number_frames_backed_up = self.number_frames
if self.first_initialisation:
# Don't set to 1, as rdma write subtracts 1 (and 0 = continuous readout!)
self.number_frames = 2
self.x10g_rdma.write(0xD0000001, self.number_frames - 1, 'Frame Gate set to \
self.number_frames')
full_empty = self.x10g_rdma.read(0x60000011, 'Check FULL EMPTY Signals')
logging.debug("Check EMPTY Signals: %s" % full_empty)
full_empty = self.x10g_rdma.read(0x60000012, 'Check FULL FULL Signals')
logging.debug("Check FULL Signals: %s" % full_empty)
if self.sensors_layout == HexitecFem.READOUTMODE[0]:
logging.debug("Reading out single sensor")
mux_mode = 0
elif self.sensors_layout == HexitecFem.READOUTMODE[1]:
mux_mode = 8
logging.debug("Reading out 2x2 sensors")
if self.selected_sensor == HexitecFem.OPTIONS[0]:
self.x10g_rdma.write(0x60000004, 0 + mux_mode, 'Sensor 1 1')
logging.debug("Sensor 1 1")
if self.selected_sensor == HexitecFem.OPTIONS[2]:
self.x10g_rdma.write(0x60000004, 4 + mux_mode, 'Sensor 2 1')
logging.debug("Sensor 2 1")
# Flush the input FIFO buffers
self.x10g_rdma.write(0x60000002, 32, 'Clear Input Buffers')
self.x10g_rdma.write(0x60000002, 0, 'Clear Input Buffers')
full_empty = self.x10g_rdma.read(0x60000011, 'Check EMPTY Signals')
logging.debug("Check EMPTY Signals: %s" % full_empty)
full_empty = self.x10g_rdma.read(0x60000012, 'Check FULL Signals')
logging.debug("Check FULL Signals: %s" % full_empty)
if self.sensors_layout == HexitecFem.READOUTMODE[1]:
self.x10g_rdma.write(0x60000002, 4, 'Enable State Machine')
if self.debug:
logging.debug("number of Frames := %s" % self.number_frames)
logging.debug("Initiate Data Capture")
self.data_stream(self.number_frames)
self.acquire_start_time = '%s' % (datetime.now().strftime(HexitecFem.DATE_FORMAT))
# How to convert datetime object to float?
self.acquire_timestamp = time.time()
self.waited = 0.1
IOLoop.instance().call_later(0.1, self.check_acquire_finished)
def check_acquire_finished(self):
"""Check whether all data transferred, until completed or cancelled by user."""
try:
delay = 0.10
reply = 0
# Stop if user clicked on Cancel button
if (self.stop_acquisition):
logging.debug(" -=-=-=- HexitecFem told to cancel acquisition -=-=-=-")
self.acquire_data_completed()
return
else:
reply = self.x10g_rdma.read(0x60000014, 'Check data transfer completed?')
if reply > 0:
self.acquire_data_completed()
return
else:
self.waited += delay
IOLoop.instance().call_later(delay, self.check_acquire_finished)
return
except (HexitecFemError, ParameterTreeError) as e:
self._set_status_error("Failed to collect data: %s" % str(e))
logging.error("%s" % str(e))
except Exception as e:
self._set_status_error("Uncaught Exception; Data collection failed: %s" % str(e))
logging.error("%s" % str(e))
# Acquisition interrupted
self.acquisition_completed = True
if self.first_initialisation:
self.first_initialisation_done_update_gui()
def acquire_data_completed(self):
"""Reset variables and read out Firmware monitors post data transfer."""
self.acquire_stop_time = '%s' % (datetime.now().strftime(HexitecFem.DATE_FORMAT))
# Stop the state machine
self.x10g_rdma.write(0x60000002, 0, 'Dis-Enable State Machine')
# Clear enable signal
self.x10g_rdma.write(0xD0000000, 2, 'Clear enable signal')
self.x10g_rdma.write(0xD0000000, 0, 'Clear enable signal')
if self.stop_acquisition:
logging.error("Acquisition stopped prematurely")
# Reset variables
self.stop_acquisition = False
self.operation_percentage_complete = 100
self.initialise_progress = 0
self.hardware_busy = False
self.acquisition_completed = True
if self.first_initialisation:
self.first_initialisation_done_update_gui()
raise HexitecFemError("User cancelled Acquire")
else:
waited = str(self.waited)
logging.debug("Capturing {} frames took {} s".format(str(self.number_frames), waited))
duration = "Requested {} frame(s), took {} seconds".format(self.number_frames, waited)
self._set_status_message(duration)
# Save duration to separate parameter tree entry:
self.acquisition_duration = duration
logging.debug("Acquisition Completed, enable signal cleared")
# Clear the Mux Mode bit
if self.selected_sensor == HexitecFem.OPTIONS[0]:
self.x10g_rdma.write(0x60000004, 0, 'Sensor 1 1')
logging.debug("Sensor 1 1")
if self.selected_sensor == HexitecFem.OPTIONS[2]:
self.x10g_rdma.write(0x60000004, 4, 'Sensor 2 1')
logging.debug("Sensor 2 1")
full_empty = self.x10g_rdma.read(0x60000011, 'Check EMPTY Signals')
logging.debug("Check EMPTY Signals: %s" % full_empty)
full_empty = self.x10g_rdma.read(0x60000012, 'Check FULL Signals')
logging.debug("Check FULL Signals: %s" % full_empty)
no_frames = self.x10g_rdma.read(0xD0000001, 'Check Number of Frames setting') + 1
logging.debug("Number of Frames: %s" % no_frames)
logging.debug("Output from Sensor")
m0 = self.x10g_rdma.read(0x70000010, 'frame last length')
logging.debug("frame last length: %s" % m0)
m0 = self.x10g_rdma.read(0x70000011, 'frame max length')
logging.debug("frame max length: %s" % m0)
m0 = self.x10g_rdma.read(0x70000012, 'frame min length')
logging.debug("frame min length: %s" % m0)
m0 = self.x10g_rdma.read(0x70000013, 'frame number')
logging.debug("frame number: %s" % m0)
m0 = self.x10g_rdma.read(0x70000014, 'frame last clock cycles')
logging.debug("frame last clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x70000015, 'frame max clock cycles')
logging.debug("frame max clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x70000016, 'frame min clock cycles')
logging.debug("frame min clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x70000017, 'frame data total')
logging.debug("frame data total: %s" % m0)
m0 = self.x10g_rdma.read(0x70000018, 'frame data total clock cycles')
logging.debug("frame data total clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x70000019, 'frame trigger count')
logging.debug("frame trigger count: %s" % m0)
m0 = self.x10g_rdma.read(0x7000001A, 'frame in progress flag')
logging.debug("frame in progress flag: %s" % m0)
logging.debug("Output from Frame Gate")
m0 = self.x10g_rdma.read(0x80000010, 'frame last length')
logging.debug("frame last length: %s" % m0)
m0 = self.x10g_rdma.read(0x80000011, 'frame max length')
logging.debug("frame max length: %s" % m0)
m0 = self.x10g_rdma.read(0x80000012, 'frame min length')
logging.debug("frame min length: %s" % m0)
m0 = self.x10g_rdma.read(0x80000013, 'frame number')
logging.debug("frame number: %s" % m0)
m0 = self.x10g_rdma.read(0x80000014, 'frame last clock cycles')
logging.debug("frame last clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x80000015, 'frame max clock cycles')
logging.debug("frame max clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x80000016, 'frame min clock cycles')
logging.debug("frame min clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x80000017, 'frame data total')
logging.debug("frame data total: %s" % m0)
m0 = self.x10g_rdma.read(0x80000018, 'frame data total clock cycles')
logging.debug("frame data total clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x80000019, 'frame trigger count')
logging.debug("frame trigger count: %s" % m0)
m0 = self.x10g_rdma.read(0x8000001A, 'frame in progress flag')
logging.debug("frame in progress flag: %s" % m0)
logging.debug("Input to XAUI") # Conn'd to 10G core going out
m0 = self.x10g_rdma.read(0x90000010, 'frame last length')
logging.debug("frame last length: %s" % m0)
m0 = self.x10g_rdma.read(0x90000011, 'frame max length')
logging.debug("frame max length: %s" % m0)
m0 = self.x10g_rdma.read(0x90000012, 'frame min length')
logging.debug("frame min length: %s" % m0)
m0 = self.x10g_rdma.read(0x90000013, 'frame number')
logging.debug("frame number: %s" % m0)
m0 = self.x10g_rdma.read(0x90000014, 'frame last clock cycles')
logging.debug("frame last clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x90000015, 'frame max clock cycles')
logging.debug("frame max clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x90000016, 'frame min clock cycles')
logging.debug("frame min clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x90000017, 'frame data total')
logging.debug("frame data total: %s" % m0)
m0 = self.x10g_rdma.read(0x90000018, 'frame data total clock cycles')
logging.debug("frame data total clock cycles: %s" % m0)
m0 = self.x10g_rdma.read(0x90000019, 'frame trigger count')
logging.debug("frame trigger count: %s" % m0)
m0 = self.x10g_rdma.read(0x9000001A, 'frame in progress flag')
logging.debug("frame in progress flag: %s" % m0)
# Fem finished sending data/monitoring info, clear hardware busy
self.hardware_busy = False
# Workout exact duration of fem data transmission:
self.acquire_time = float(self.acquire_stop_time.split("_")[1]) \
- float(self.acquire_start_time.split("_")[1])
start_ = datetime.strptime(self.acquire_start_time, HexitecFem.DATE_FORMAT)
stop_ = datetime.strptime(self.acquire_stop_time, HexitecFem.DATE_FORMAT)
self.acquire_time = (stop_ - start_).total_seconds()
# Wrap up by updating GUI
self.operation_percentage_complete = 100
self.initialise_progress = 0
# Acquisition completed, note completion
self.acquisition_completed = True
if self.first_initialisation:
self.first_initialisation_done_update_gui()
self._set_status_message("Initialisation from cold completed")
def first_initialisation_done_update_gui(self):
"""Reset related variables."""
self.first_initialisation = False
self.number_frames = self.number_frames_backed_up
def set_up_state_machine(self):
"""Set up state machine, optionally with values from hexitec ini file."""
logging.debug("Setting up state machine")
# Establish register values, default values
register_002 = 0x30, 0x32
register_003 = 0x30, 0x33
register_004 = 0x30, 0x34
register_005 = 0x30, 0x35
register_006 = 0x30, 0x36
register_007 = 0x30, 0x37
register_009 = 0x30, 0x39
register_00E = 0x30, 0x45
register_018 = 0x31, 0x38
register_019 = 0x31, 0x39
register_01B = 0x31, 0x42
register_014 = 0x31, 0x34
value_002 = 0x30, 0x31 # RowS1 Low Byte value: 1 = maximum frame rate
value_003 = 0x30, 0x30 # RowS1 High Byte value : 0 = ditto
value_004 = 0x30, 0x31 # S1 -> Sph, 6 bits : 1 = ... Yes, what?
value_005 = 0x30, 0x36 # SphS2, 6 bits : 6 = ... Yes, what?
value_006 = 0x30, 0x31 # Gain, 1 bit : 0 = High Gain; 1 = Low Gain
# TODO: What's register_007 called?
value_007 = 0x30, 0x33 # UNNAMED, 2 bits : 1 = Enable PLL; 2 = Enable ADC PLL (3 = both)
value_009 = 0x30, 0x32 # ADC1 Delay, 5 bits : 2 = 2 clock cycles
value_00E = 0x30, 0x41
value_018 = 0x30, 0x31 # VCAL2 -> VCAL1 Low Byte, 8 bits: 1 = 1 clock cycle
value_019 = 0x30, 0x30 # VCAL2 -> VCAL1 High Byte, 7 bits
value_01B = 0x30, 0x38 # Wait Clock Row, 8 bits
value_014 = 0x30, 0x31 # Start SM on '1' falling edge ('0' = rising edge) of ADC-CLK
# # TODO: Find/Determine settings name in hexitec file?
# noname = self._extract_integer(self.hexitec_parameters, 'Control-Settings/??',
# bit_range=2)
# if noname > -1:
# value_007 = self.convert_to_aspect_format(noname)
# Send noname (Enable PPL, ADC PPL) to Register 0x07 (Accepts 2 bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_007[0], register_007[1], value_007[0], value_007[1], 0x0D])
self.read_response()
if self.row_s1 > -1:
# Valid value, within range
self.row_s1_low = self.row_s1 & 0xFF
self.row_s1_high = self.row_s1 >> 8
value_002 = self.convert_to_aspect_format(self.row_s1_low)
value_003 = self.convert_to_aspect_format(self.row_s1_high)
# Send RowS1 low byte to Register 0x02 (Accepts 8 bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_002[0], register_002[1], value_002[0], value_002[1], 0x0D])
self.read_response()
# Send RowS1 high byte to Register 0x03 (Accepts 6 bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_003[0], register_003[1], value_003[0], value_003[1], 0x0D])
self.read_response()
if self.s1_sph > -1:
value_004 = self.convert_to_aspect_format(self.s1_sph)
# Send S1SPH to Register 0x04 (Accepts 6 bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_004[0], register_004[1], value_004[0], value_004[1], 0x0D])
self.read_response()
if self.sph_s2 > -1:
value_005 = self.convert_to_aspect_format(self.sph_s2)
# Send SphS2 to Register 0x05 (Accepts 6 Bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_005[0], register_005[1], value_005[0], value_005[1], 0x0D])
self.read_response()
# # Debuggery
# print("\n")
# print(" row_S1, value_002: 0x%x, 0x%x value_003: 0x%x, 0x%x" %
# (value_002[0], value_002[1], value_003[0], value_003[1]))
# print(" S1_SpH, value_004: 0x%x, 0x%x" % (value_004[0], value_004[1]))
# print(" Sph_s2, value_005: 0x%x, 0x%x\n" % (value_005[0], value_005[1]))
# sm_timing2 = [0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
# register_002[0], register_002[1], value_002[0], value_002[1], 0x0D]
# S1_SPH = self.make_list_hexadecimal([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
# register_004[0], register_004[1],
# value_004[0], value_004[1], 0x0D])
# SPH_S2 = self.make_list_hexadecimal([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
# register_005[0], register_005[1],
# value_005[0], value_005[1], 0x0D])
# print(" sm_timing2, ", self.make_list_hexadecimal(sm_timing2))
# print(" S1_SPH, ", S1_SPH)
# print(" SPH_S2, ", SPH_S2)
# print("\n")
# TODO: What should default value be? (not set by JE previously!)
gain = self._extract_integer(self.hexitec_parameters, 'Control-Settings/Gain', bit_range=1)
if gain > -1:
value_006 = self.convert_to_aspect_format(gain)
# Send Gain to Register 0x06 (Accepts 1 Bit)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_006[0], register_006[1], value_006[0], value_006[1], 0x0D])
self.read_response()
adc1_delay = self._extract_integer(self.hexitec_parameters, 'Control-Settings/ADC1 Delay',
bit_range=2)
if adc1_delay > -1:
value_009 = self.convert_to_aspect_format(adc1_delay)
# Send ADC1 Delay to Register 0x09 (Accepts 2 Bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_009[0], register_009[1], value_009[0], value_009[1], 0x0D])
self.read_response()
delay_sync_signals = self._extract_integer(self.hexitec_parameters,
'Control-Settings/delay sync signals',
bit_range=8)
if delay_sync_signals > -1:
value_00E = self.convert_to_aspect_format(delay_sync_signals)
# Send delay sync signals to Register 0x0E (Accepts 8 Bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_00E[0], register_00E[1], value_00E[0], value_00E[1], 0x0D])
self.read_response()
# # TODO: Name for this setting in .ini file ??
# wait_clock_row = self._extract_integer(self.hexitec_parameters,
# 'Control-Settings/???', bit_range=8)
# if wait_clock_row > -1:
# value_01B = self.convert_to_aspect_format(wait_clock_row)
# Send wait clock wait to Register 01B (Accepts 8 Bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_01B[0], register_01B[1], value_01B[0], value_01B[1], 0x0D])
self.read_response()
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_014[0], register_014[1], value_014[0], value_014[1], 0x0D])
self.read_response()
vcal2_vcal1 = self._extract_integer(self.hexitec_parameters,
'Control-Settings/VCAL2 -> VCAL1', bit_range=15)
if vcal2_vcal1 > -1:
vcal2_vcal1_low = vcal2_vcal1 & 0xFF
vcal2_vcal1_high = vcal2_vcal1 >> 8
value_018 = self.convert_to_aspect_format(vcal2_vcal1_low)
value_019 = self.convert_to_aspect_format(vcal2_vcal1_high)
# Send VCAL2 -> VCAL1 low byte to Register 0x02 (Accepts 8 bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_018[0], register_018[1], value_018[0], value_018[1], 0x0D])
self.read_response()
# Send VCAL2 -> VCAL1 high byte to Register 0x03 (Accepts 7 bits)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.SET_REG_BIT,
register_019[0], register_019[1], value_019[0], value_019[1], 0x0D])
self.read_response()
# # DEBUG
# print("");print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-");print("")
# print(" VCAL2, L:", self.make_list_hexadecimal([0x23, self.vsr_addr,
# HexitecFem.SET_REG_BIT, register_018[0], register_018[1],
# value_018[0], value_018[1], 0x0D]))
# print(" VCAL2, H:", self.make_list_hexadecimal([0x23, self.vsr_addr,
# HexitecFem.SET_REG_BIT, register_019[0], register_019[1],
# value_019[0], value_019[1], 0x0D]))
# print("");print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-");print("")
# Recalculate frame_rate, et cetera if new clock values provided by .ini
self.calculate_frame_rate()
logging.debug("Finished Setting up state machine")
def collect_offsets(self):
"""Run collect offsets sequence.
Stop state machine, gathers offsets, calculats average picture, re-starts state machine.
"""
try:
# Displays all registers and contents:
# self.dump_all_registers()
if self.hardware_connected is not True:
raise ParameterTreeError("Can't collect offsets while disconnected")
if self.hardware_busy:
raise HexitecFemError("Hardware sensors busy initialising")
else:
self._set_status_error("")
self.hardware_busy = True
self.operation_percentage_complete = 0
self.operation_percentage_steps = 1
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0],
HexitecFem.READ_REG_VALUE, 0x32, 0x34, 0x0D])
vsr1 = self.read_response()
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1],
HexitecFem.READ_REG_VALUE, 0x32, 0x34, 0x0D])
vsr2 = self.read_response()
logging.debug("Reading back register 24; VSR1: '%s' VSR2: '%s'" %
(vsr1.replace('\r', ''), vsr2.replace('\r', '')))
# Send reg value; Register 0x24, bits5,1: disable VCAL, capture average picture:
enable_dc_vsr1 = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.SEND_REG_VALUE,
0x32, 0x34, 0x32, 0x32, 0x0D]
enable_dc_vsr2 = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.SEND_REG_VALUE,
0x32, 0x34, 0x32, 0x32, 0x0D]
# Send reg value; Register 0x24, bits5,3: disable VCAL, enable spectroscopic mode:
disable_dc_vsr1 = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.SEND_REG_VALUE,
0x32, 0x34, 0x32, 0x38, 0x0D]
disable_dc_vsr2 = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.SEND_REG_VALUE,
0x32, 0x34, 0x32, 0x38, 0x0D]
enable_sm_vsr1 = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.SET_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
enable_sm_vsr2 = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.SET_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
disable_sm_vsr1 = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.CLR_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
disable_sm_vsr2 = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.CLR_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
# 1. System is fully initialised (Done already)
# 2. Stop the state machine
self.send_cmd(disable_sm_vsr1)
self.read_response()
self.send_cmd(disable_sm_vsr2)
self.read_response()
# 3. Set reg 0x24 to 0x22
logging.debug("Gathering offsets..")
self.send_cmd(enable_dc_vsr1)
self.read_response()
self.send_cmd(enable_dc_vsr2)
self.read_response()
# 4. Start the state machine
self.send_cmd(enable_sm_vsr1)
self.read_response()
self.send_cmd(enable_sm_vsr2)
self.read_response()
# 5. Wait > 8192 * frame time (1 second)
# TODO: Replace hardcoded delay with polling Reg 0x89, but not working right now
time.sleep(1)
# time.sleep(0.25)
# count = 0
# while count < 20:
# (vsr2, vsr1) = self.debug_reg89()
# int_value = int(vsr2[1:]), int(vsr1[1:])
# print(" Reg0x89: {}".format(int_value))
# count += 1
# time.sleep(1)
# 6. Stop state machine
self.send_cmd(disable_sm_vsr1)
self.read_response()
self.send_cmd(disable_sm_vsr2)
self.read_response()
# 7. Set reg 0x24 to 0x28
logging.debug("Offsets collected")
self.send_cmd(disable_dc_vsr1)
self.read_response()
self.send_cmd(disable_dc_vsr2)
self.read_response()
# 8. Start state machine
self.send_cmd(enable_sm_vsr1)
self.read_response()
self.send_cmd(enable_sm_vsr2)
self.read_response()
logging.debug("Ensure VCAL remains on")
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.CLR_REG_BIT,
0x32, 0x34, 0x32, 0x30, 0x0D])
self.read_response()
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.CLR_REG_BIT,
0x32, 0x34, 0x32, 0x30, 0x0D])
self.read_response()
self.operation_percentage_complete = 100
self._set_status_message("Offsets collections operation completed.")
self.hardware_busy = False
except (HexitecFemError, ParameterTreeError) as e:
self._set_status_error("Can't collect offsets while disconnected: %s" % str(e))
logging.error("%s" % str(e))
except Exception as e:
self._set_status_error("Uncaught Exception; Failed to collect offsets: %s" % str(e))
logging.error("%s" % str(e))
def load_pwr_cal_read_enables(self): # noqa: C901
"""Load power, calibration and read enables - optionally from hexitec file."""
# The default values are either 20 times 0 (0x30), or 20 times F (0x46)
# Make a list for each of these scenarios: (and use where needed)
list_of_46s = [0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46,
0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46, 0x46]
list_of_30s = [0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30]
enable_sm = [0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x30, 0x31, 0x30, 0x31, 0x0D]
disable_sm = [0x23, self.vsr_addr, HexitecFem.CLR_REG_BIT, 0x30, 0x31, 0x30, 0x31, 0x0D]
vsr = -1
if self.vsr_addr == HexitecFem.VSR_ADDRESS[0]: # 0x90
vsr = 1
else:
if self.vsr_addr == HexitecFem.VSR_ADDRESS[1]: # 0x91
vsr = 2
else:
raise HexitecFemError("Unknown VSR address! (%s)" % self.vsr_addr)
register_061 = [0x36, 0x31] # Column Read Enable ASIC1
register_0C2 = [0x43, 0x32] # Column Read Enable ASIC2
value_061 = list_of_46s
value_0C2 = list_of_46s
asic1_read_enable = self._extract_80_bits(self.hexitec_parameters, "ColumnEn_",
vsr, 1, "Channel")
# Check list of (-1, -1) tuples wasn't returned
if asic1_read_enable[0][0] > 0:
asic1_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_061[0], register_061[1]]
for msb, lsb in asic1_read_enable:
asic1_command.append(msb)
asic1_command.append(lsb)
asic1_command.append(0x0D)
# Column Read Enable, for ASIC1 (Reg 0x61)
col_read_enable1 = asic1_command
else:
# No ini file loaded, use default values
col_read_enable1 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST, register_061[0],
register_061[1], value_061[0], value_061[1], value_061[2],
value_061[3], value_061[4], value_061[5], value_061[6],
value_061[7], value_061[8], value_061[9], value_061[10],
value_061[11], value_061[12], value_061[13], value_061[14],
value_061[15], value_061[16], value_061[17], value_061[18],
value_061[19], 0x0D]
asic2_read_enable = self._extract_80_bits(self.hexitec_parameters, "ColumnEn_",
vsr, 2, "Channel")
if asic2_read_enable[0][0] > 0:
asic2_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0C2[0], register_0C2[1]]
for msb, lsb in asic2_read_enable:
asic2_command.append(msb)
asic2_command.append(lsb)
asic2_command.append(0x0D)
col_read_enable2 = asic2_command
else:
# Column Read Enable, for ASIC2 (Reg 0xC2)
col_read_enable2 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0C2[0], register_0C2[1], value_0C2[0], value_0C2[1],
value_0C2[2], value_0C2[3], value_0C2[4], value_0C2[5],
value_0C2[6], value_0C2[7], value_0C2[8], value_0C2[9],
value_0C2[10], value_0C2[11], value_0C2[12], value_0C2[13],
value_0C2[14], value_0C2[15], value_0C2[16], value_0C2[17],
value_0C2[18], value_0C2[19], 0x0D]
# Column Power Enable
register_04D = [0x34, 0x44] # Column Power Enable ASIC1 (Reg 0x4D)
register_0AE = [0x41, 0x45] # Column Power Enable ASIC2 (Reg 0xAE)
value_04D = list_of_46s
value_0AE = list_of_46s
asic1_power_enable = self._extract_80_bits(self.hexitec_parameters, "ColumnPwr",
vsr, 1, "Channel")
if asic1_power_enable[0][0] > 0:
asic1_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_04D[0], register_04D[1]]
for msb, lsb in asic1_power_enable:
asic1_command.append(msb)
asic1_command.append(lsb)
asic1_command.append(0x0D)
col_power_enable1 = asic1_command
else:
# Column Power Enable, for ASIC1 (Reg 0x4D)
col_power_enable1 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_04D[0], register_04D[1], value_04D[0], value_04D[1],
value_04D[2], value_04D[3], value_04D[4], value_04D[5],
value_04D[6], value_04D[7], value_04D[8], value_04D[9],
value_04D[10], value_04D[11], value_04D[12], value_04D[13],
value_04D[14], value_04D[15], value_04D[16], value_04D[17],
value_04D[18], value_04D[19], 0x0D]
asic2_power_enable = self._extract_80_bits(self.hexitec_parameters, "ColumnPwr",
vsr, 2, "Channel")
if asic2_power_enable[0][0] > 0:
asic2_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0AE[0], register_0AE[1]]
for msb, lsb in asic2_power_enable:
asic2_command.append(msb)
asic2_command.append(lsb)
asic2_command.append(0x0D)
col_power_enable2 = asic2_command
else:
# Column Power Enable, for ASIC2 (Reg 0xAE)
col_power_enable2 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0AE[0], register_0AE[1], value_0AE[0], value_0AE[1],
value_0AE[2], value_0AE[3], value_0AE[4], value_0AE[5],
value_0AE[6], value_0AE[7], value_0AE[8], value_0AE[9],
value_0AE[10], value_0AE[11], value_0AE[12], value_0AE[13],
value_0AE[14], value_0AE[15], value_0AE[16], value_0AE[17],
value_0AE[18], value_0AE[19], 0x0D]
# Column Calibration Enable
register_057 = [0x35, 0x37] # Column Calibrate Enable ASIC1 (Reg 0x57)
register_0B8 = [0x42, 0x38] # Column Calibrate Enable ASIC2 (Reg 0xB8)
value_057 = list_of_30s
value_0B8 = list_of_30s
asic1_cal_enable = self._extract_80_bits(self.hexitec_parameters, "ColumnCal",
vsr, 1, "Channel")
if asic1_cal_enable[0][0] > 0:
asic1_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_057[0], register_057[1]]
for msb, lsb in asic1_cal_enable:
asic1_command.append(msb)
asic1_command.append(lsb)
asic1_command.append(0x0D)
col_cal_enable1 = asic1_command
else:
# Column Calibrate Enable, for ASIC1 (Reg 0x57)
col_cal_enable1 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_057[0], register_057[1], value_057[0], value_057[1],
value_057[2], value_057[3], value_057[4], value_057[5],
value_057[6], value_057[7], value_057[8], value_057[9],
value_057[10], value_057[11], value_057[12], value_057[13],
value_057[14], value_057[15], value_057[16], value_057[17],
value_057[18], value_057[19], 0x0D]
asic2_cal_enable = self._extract_80_bits(self.hexitec_parameters, "ColumnCal",
vsr, 2, "Channel")
if asic2_cal_enable[0][0] > 0:
asic2_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0B8[0], register_0B8[1]]
for msb, lsb in asic2_cal_enable:
asic2_command.append(msb)
asic2_command.append(lsb)
asic2_command.append(0x0D)
col_cal_enable2 = asic2_command
else:
# Column Calibrate Enable, for ASIC2 (Reg 0xB8)
col_cal_enable2 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0B8[0], register_0B8[1], value_0B8[0], value_0B8[1],
value_0B8[2], value_0B8[3], value_0B8[4], value_0B8[5],
value_0B8[6], value_0B8[7], value_0B8[8], value_0B8[9],
value_0B8[10], value_0B8[11], value_0B8[12], value_0B8[13],
value_0B8[14], value_0B8[15], value_0B8[16], value_0B8[17],
value_0B8[18], value_0B8[19], 0x0D]
# Row Read Enable
register_043 = [0x34, 0x33] # Row Read Enable ASIC1
register_0A4 = [0x41, 0x34] # Row Read Enable ASIC2
value_043 = list_of_46s
value_0A4 = list_of_46s
asic1_cal_enable = self._extract_80_bits(self.hexitec_parameters, "RowEn_",
vsr, 1, "Block")
if asic1_cal_enable[0][0] > 0:
asic1_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_043[0], register_043[1]]
for msb, lsb in asic1_cal_enable:
asic1_command.append(msb)
asic1_command.append(lsb)
asic1_command.append(0x0D)
row_read_enable1 = asic1_command
else:
# Row Read Enable, for ASIC1 (Reg 0x43)
row_read_enable1 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_043[0], register_043[1], value_043[0], value_043[1],
value_043[2], value_043[3], value_043[4], value_043[5],
value_043[6], value_043[7], value_043[8], value_043[9],
value_043[10], value_043[11], value_043[12], value_043[13],
value_043[14], value_043[15], value_043[16], value_043[17],
value_043[18], value_043[19], 0x0D]
asic2_cal_enable = self._extract_80_bits(self.hexitec_parameters, "RowEn_", vsr, 2, "Block")
if asic2_cal_enable[0][0] > 0:
asic2_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0A4[0], register_0A4[1]]
for msb, lsb in asic2_cal_enable:
asic2_command.append(msb)
asic2_command.append(lsb)
asic2_command.append(0x0D)
row_read_enable2 = asic2_command
else:
# Row Read Enable, for ASIC2 (Reg 0xA4)
row_read_enable2 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_0A4[0], register_0A4[1], value_0A4[0], value_0A4[1],
value_0A4[2], value_0A4[3], value_0A4[4], value_0A4[5],
value_0A4[6], value_0A4[7], value_0A4[8], value_0A4[9],
value_0A4[10], value_0A4[11], value_0A4[12], value_0A4[13],
value_0A4[14], value_0A4[15], value_0A4[16], value_0A4[17],
value_0A4[18], value_0A4[19], 0x0D]
# Row Power Enable
register_02F = [0x32, 0x46] # Row Power Enable ASIC1 (Reg 0x2F)
register_090 = [0x39, 0x30] # Row Power Enable ASIC2 (Reg 0x90)
value_02F = list_of_46s
value_090 = list_of_46s
asic1_pwr_enable = self._extract_80_bits(self.hexitec_parameters, "RowPwr", vsr, 1, "Block")
if asic1_pwr_enable[0][0] > 0:
asic1_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_02F[0], register_02F[1]]
for msb, lsb in asic1_pwr_enable:
asic1_command.append(msb)
asic1_command.append(lsb)
asic1_command.append(0x0D)
row_power_enable1 = asic1_command
else:
# Row Power Enable, for ASIC1 (Reg 0x2F)
row_power_enable1 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_02F[0], register_02F[1], value_02F[0], value_02F[1],
value_02F[2], value_02F[3], value_02F[4], value_02F[5],
value_02F[6], value_02F[7], value_02F[8], value_02F[9],
value_02F[10], value_02F[1], value_02F[2], value_02F[3],
value_02F[14], value_02F[15], value_02F[16], value_02F[17],
value_02F[18], value_02F[19], 0x0D]
asic2_pwr_enable = self._extract_80_bits(self.hexitec_parameters, "RowPwr", vsr, 2, "Block")
if asic2_pwr_enable[0][0] > 0:
asic2_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_090[0], register_090[1]]
for msb, lsb in asic2_pwr_enable:
asic2_command.append(msb)
asic2_command.append(lsb)
asic2_command.append(0x0D)
row_power_enable2 = asic2_command
else:
# Row Power Enable, for ASIC2 (Reg 0x90)
row_power_enable2 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_090[0], register_090[1], value_090[0], value_090[1],
value_090[2], value_090[3], value_090[4], value_090[5],
value_090[6], value_090[7], value_090[8], value_090[9],
value_090[10], value_090[11], value_090[12], value_090[13],
value_090[14], value_090[15], value_090[16], value_090[17],
value_090[18], value_090[19], 0x0D]
# Row Calibration Enable
register_039 = [0x33, 0x39] # Row Calibrate Enable ASIC1 (Reg 0x39)
register_09A = [0x39, 0x41] # Row Calibrate Enable ASIC2 (Reg 0x9A)
value_039 = list_of_30s
value_09A = list_of_30s
asic1_cal_enable = self._extract_80_bits(self.hexitec_parameters, "RowCal", vsr, 1, "Block")
if asic1_cal_enable[0][0] > 0:
asic1_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_039[0], register_039[1]]
for msb, lsb in asic1_cal_enable:
asic1_command.append(msb)
asic1_command.append(lsb)
asic1_command.append(0x0D)
row_cal_enable1 = asic1_command
else:
# Row Calibrate Enable, for ASIC1 (Reg 0x39)
row_cal_enable1 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_039[0], register_039[1], value_039[0], value_039[1],
value_039[2], value_039[3], value_039[4], value_039[5],
value_039[6], value_039[7], value_039[8], value_039[9],
value_039[10], value_039[11], value_039[12], value_039[13],
value_039[14], value_039[15], value_039[16], value_039[17],
value_039[18], value_039[19], 0x0D]
asic2_cal_enable = self._extract_80_bits(self.hexitec_parameters, "RowCal", vsr, 2, "Block")
if asic2_cal_enable[0][0] > 0:
asic2_command = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_09A[0], register_09A[1]]
for msb, lsb in asic2_cal_enable:
asic2_command.append(msb)
asic2_command.append(lsb)
asic2_command.append(0x0D)
row_cal_enable2 = asic2_command
else:
# Row Calibrate Enable, for ASIC2 (Reg 0x9A)
row_cal_enable2 = [0x23, self.vsr_addr, HexitecFem.SEND_REG_BURST,
register_09A[0], register_09A[1], value_09A[0], value_09A[1],
value_09A[2], value_09A[3], value_09A[4], value_09A[5],
value_09A[6], value_09A[7], value_09A[8], value_09A[9],
value_09A[10], value_09A[11], value_09A[12], value_09A[13],
value_09A[14], value_09A[15], value_09A[16], value_09A[17],
value_09A[18], value_09A[19], 0x0D]
self.send_cmd(disable_sm)
self.read_response()
logging.debug("Loading Power, Cal and Read Enables")
logging.debug("Column power enable")
self.send_cmd(col_power_enable1) # 0x4D
self.read_response()
self.send_cmd(col_power_enable2)
self.read_response()
logging.debug("Row power enable")
self.send_cmd(row_power_enable1) # 0x2F
self.read_response()
self.send_cmd(row_power_enable2)
self.read_response()
# Default selection
logging.debug("Column cal enable D")
self.send_cmd(col_cal_enable1) # 0x57
self.read_response()
self.send_cmd(col_cal_enable2)
self.read_response()
logging.debug("Row cal enable D")
self.send_cmd(row_cal_enable1) # 0x39
self.read_response()
self.send_cmd(row_cal_enable2)
self.read_response()
logging.debug("Column read enable")
self.send_cmd(col_read_enable1) # 0x61
self.read_response()
self.send_cmd(col_read_enable2)
self.read_response()
logging.debug("Row read enable")
self.send_cmd(row_read_enable1) # 0x43
self.read_response()
self.send_cmd(row_read_enable2)
self.read_response()
logging.debug("Power, Cal and Read Enables have been loaded")
self.send_cmd(enable_sm)
self.read_response()
def write_dac_values(self):
"""Write values to DAC, optionally provided by hexitec file."""
logging.debug("Writing DAC values")
vcal = [0x30, 0x32, 0x41, 0x41]
umid = [0x30, 0x35, 0x35, 0x35]
hv = [0x30, 0x35, 0x35, 0x35]
dctrl = [0x30, 0x30, 0x30, 0x30]
rsrv2 = [0x30, 0x38, 0x45, 0x38]
umid_value = self._extract_exponential(self.hexitec_parameters,
'Control-Settings/Uref_mid', bit_range=12)
if umid_value > -1:
# Valid value, within range
umid_high = (umid_value >> 8) & 0x0F
umid_low = umid_value & 0xFF
umid[0], umid[1] = self.convert_to_aspect_format(umid_high)
umid[2], umid[3] = self.convert_to_aspect_format(umid_low)
vcal_value = self._extract_float(self.hexitec_parameters, 'Control-Settings/VCAL')
if vcal_value > -1:
# Valid value, within range
vcal_high = (vcal_value >> 8) & 0x0F
vcal_low = vcal_value & 0xFF
vcal[0], vcal[1] = self.convert_to_aspect_format(vcal_high)
vcal[2], vcal[3] = self.convert_to_aspect_format(vcal_low)
self.send_cmd([0x23, self.vsr_addr, HexitecFem.WRITE_DAC_VAL,
vcal[0], vcal[1], vcal[2], vcal[3], # Vcal, e.g. 0x0111 =: 0.2V
umid[0], umid[1], umid[2], umid[3], # Umid, e.g. 0x0555 =: 1.0V
hv[0], hv[1], hv[2], hv[3], # reserve1, 0x0555 =: 1V (HV ~-250V)
dctrl[0], dctrl[1], dctrl[2], dctrl[3], # DET ctrl, 0x000
rsrv2[0], rsrv2[1], rsrv2[2], rsrv2[3], # reserve2, 0x08E8 =: 1.67V
0x0D])
self.read_response()
logging.debug("DAC values set")
def make_list_hexadecimal(self, value): # pragma: no cover
"""Debug function: Turn decimal list into hexadecimal list."""
value_hexadecimal = []
for val in value:
value_hexadecimal.append("0x%x" % val)
return value_hexadecimal
def enable_adc(self):
"""Enable the ADCs."""
logging.debug("Enabling ADC")
adc_disable = [0x23, self.vsr_addr, HexitecFem.CTRL_ADC_DAC, 0x30, 0x32, 0x0D]
enable_sm = [0x23, self.vsr_addr, HexitecFem.SET_REG_BIT, 0x30, 0x31, 0x30, 0x31, 0x0D]
adc_enable = [0x23, self.vsr_addr, HexitecFem.CTRL_ADC_DAC, 0x30, 0x33, 0x0D]
adc_set = [0x23, self.vsr_addr, HexitecFem.WRITE_REG_VAL, 0x31, 0x36, 0x30, 0x39, 0x0D]
self.send_cmd(adc_disable)
self.read_response()
logging.debug("Enable SM")
self.send_cmd(enable_sm)
self.read_response()
self.send_cmd(adc_enable)
self.read_response()
self.send_cmd(adc_set)
self.read_response()
# Disable ADC test testmode
self.send_cmd([0x23, self.vsr_addr, HexitecFem.WRITE_REG_VAL, 0x30, 0x44, 0x30, 0x30, 0x0d])
self.read_response()
def initialise_system(self):
"""Configure in full VSR2, then VSR1.
Initialise, load enables, set up state machine, write to DAC and enable ADCs.
"""
enable_sm_vsr1 = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.SET_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
enable_sm_vsr2 = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.SET_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
disable_sm_vsr1 = [0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.CLR_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
disable_sm_vsr2 = [0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.CLR_REG_BIT,
0x30, 0x31, 0x30, 0x31, 0x0D]
# Note current setting, change Register 143 (0x8F) -> 1, confirm changed
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.READ_REG_VALUE,
0x38, 0x46, 0x0D])
self.read_response()
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.SET_REG_BIT,
0x38, 0x46, 0x30, 0x31, 0x0D])
self.read_response()
# Repeat with other VSR board
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.READ_REG_VALUE,
0x38, 0x46, 0x0D])
self.read_response()
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.SET_REG_BIT,
0x38, 0x46, 0x30, 0x31, 0x0D])
self.read_response()
# Stop the state machine
self.send_cmd(disable_sm_vsr1)
self.read_response()
self.send_cmd(disable_sm_vsr2)
self.read_response()
# Re-Start the state machine
self.send_cmd(enable_sm_vsr1)
self.read_response()
self.send_cmd(enable_sm_vsr2)
self.read_response()
###
self._set_status_message("Configuring VSR2")
self.selected_sensor = HexitecFem.OPTIONS[2]
self.initialise_sensor()
self._set_status_message("VSR2: Sensors initialised.")
self.set_up_state_machine()
self._set_status_message("VSR2: State Machine setup")
self.write_dac_values()
self._set_status_message("VSR2: DAC values written")
self.enable_adc()
self._set_status_message("VSR2: ADC enabled")
self.load_pwr_cal_read_enables()
self._set_status_message("VSR2: Loaded Power, Calibrate, Read Enables")
synced_status = self.calibrate_sensor()
logging.debug("Calibrated sensor returned synchronised status: %s" % synced_status)
self._set_status_message("Configuring VSR1")
self.selected_sensor = HexitecFem.OPTIONS[0]
self.initialise_sensor()
self._set_status_message("VSR1: Sensors initialised")
self.set_up_state_machine()
self._set_status_message("VSR1: State Machine setup")
self.write_dac_values()
self._set_status_message("VSR1: DAC values written")
self.enable_adc()
self._set_status_message("VSR1: ADC enabled")
self.load_pwr_cal_read_enables()
self._set_status_message("VSR1: Loaded Power, Calibrate, Read Enables")
synced_status = self.calibrate_sensor()
logging.debug("Calibrated sensor returned synchronised status: %s" % synced_status)
self._set_status_message("Initialisation completed. VSR2 and VS1 configured.")
print(" -=-=-=- -=-=-=- -=-=-=- -=-=-=- -=-=-=- -=-=-=- ")
def calculate_frame_rate(self):
"""Calculate variables to determine frame rate (See ASICTimingRateDefault.xlsx)."""
# Calculate RowReadClks
ADC_Clk = 21250000 # B2
ASIC_Clk1 = ADC_Clk * 2 # B3 = B2 * 2
ASIC_Clk2 = 1 / ASIC_Clk1 # B4 = 1 / B3
Rows = 80 # B6; Hard coded yes?
Columns = 20 # B7; Hard coded too?
WaitCol = 1 # B9; Hard coded too?
WaitRow = 8 # B10
# self.row_s1 # B12 from hexitecVSR file
# self.s1_sph # B13 from file
# self.sph_s2 # B14 from file
# B16 = ((B7 + B9 + B12 + B13 + B14) * 2) + 10
RowReadClks = ((Columns + WaitCol + self.row_s1 + self.s1_sph + self.sph_s2) * 2) + 10
# B18 = B6 * B16 + 4 + (B10 * 2)
frameReadClks = (Rows * RowReadClks) + 4 + (WaitRow * 2)
# B20 = (B18 * 3) + 2) * (B4 / 3)
frame_time = ((frameReadClks * 3) + 2) * (ASIC_Clk2 / 3)
# B21 = 1 / B20
frame_rate = 1 / frame_time
self.frame_rate = frame_rate
if self.duration_enabled:
# With duration enabled, recalculate number of frames in case clocks changed
self.set_duration(self.duration)
self.parent.set_number_frames(self.number_frames)
def print_vcal_registers(self, vsr_addr): # pragma: no cover
"""Debug function: Print all VCAL (Power, calibrate & read enables) registers."""
print("---------------------------------------------------------------------------------")
# ROW, ASIC 1
rpe1 = [0x2F, 0x38]
row_power_enable1 = self.read_back_register(vsr_addr, rpe1)
rce1 = [0x39, 0x42]
row_cal_enable1 = self.read_back_register(vsr_addr, rce1)
rre1 = [0x43, 0x4c]
row_read_enable1 = self.read_back_register(vsr_addr, rre1)
print("\t\tRow Pwr Ena ASIC1: %s \t\tRow Cal Ena ASIC1: %s \t\tRow Rd Ena ASIC1: %s"
% (row_power_enable1, row_cal_enable1, row_read_enable1))
# COLUMN, ASIC 1
cpe1 = [0x4d, 0x56]
col_power_enable1 = self.read_back_register(vsr_addr, cpe1)
cce1 = [0x57, 0x60]
col_cal_enable1 = self.read_back_register(vsr_addr, cce1)
cre1 = [0x61, 0x6a]
col_read_enable1 = self.read_back_register(vsr_addr, cre1)
print("\t\tCol Pwr Ena ASIC1: %s \t\tCol Cal Ena ASIC1: %s \t\tCol Rd Ena ASIC1: %s"
% (col_power_enable1, col_cal_enable1, col_read_enable1))
print("---------------------------------------------------------------------------------")
# ROW, ASIC 2
rpe2 = [0x90, 0x99]
row_power_enable2 = self.read_back_register(vsr_addr, rpe2)
rce2 = [0x9A, 0xA3]
row_cal_enable2 = self.read_back_register(vsr_addr, rce2)
rre2 = [0xA4, 0xAD]
row_read_enable2 = self.read_back_register(vsr_addr, rre2)
print("\t\tRow Pwr Ena ASIC2: %s \t\tRow Cal Ena ASIC2: %s \t\tRow Rd Ena ASIC2: %s"
% (row_power_enable2, row_cal_enable2, row_read_enable2))
# COLUMN, ASIC 2
cpe2 = [0xAE, 0xB7]
col_power_enable2 = self.read_back_register(vsr_addr, cpe2)
cce2 = [0xB8, 0xC1]
col_cal_enable2 = self.read_back_register(vsr_addr, cce2)
cre2 = [0xC2, 0xCB]
col_read_enable2 = self.read_back_register(vsr_addr, cre2)
print("\t\tCol Pwr Ena ASIC2: %s \t\tCol Cal Ena ASIC2: %s \t\tCol Rd Ena ASIC2: %s"
% (col_power_enable2, col_cal_enable2, col_read_enable2))
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-")
def read_back_register(self, vsr_addr, boundaries): # pragma: no cover
"""Debug function: Actual hardware interaction with VCAL registers."""
register_reply = []
for idx in range(boundaries[0], boundaries[1] + 1, 1):
formatted_address = self.convert_to_aspect_format(idx)
command = [0x23, vsr_addr, HexitecFem.READ_REG_VALUE,
formatted_address[0], formatted_address[1], 0x0D]
self.send_cmd(command)
# A typical reply: "\x90FF\r\r\r\r"
# After strip() -> "\x90FF"
# After [1:] -> "FF"
# register_reply.append(self.read_response().strip()[1:])
register_reply.append(self.read_response().strip())
return register_reply
def read_pwr_voltages(self):
"""Read and convert power data into voltages."""
self.send_cmd([0x23, self.vsr_addr, HexitecFem.READ_PWR_VOLT, 0x0D], False)
sensors_values = self.read_response()
sensors_values = sensors_values.strip()
if self.debug:
logging.debug("VSR: %s Power values: %s len: %s" % (format(self.vsr_addr, '#02x'),
sensors_values, len(sensors_values)))
if (self.vsr_addr == HexitecFem.VSR_ADDRESS[0]):
self.vsr1_hv = self.get_hv_value(sensors_values)
else:
if (self.vsr_addr == HexitecFem.VSR_ADDRESS[1]):
self.vsr2_hv = self.get_hv_value(sensors_values)
def get_hv_value(self, sensors_values):
"""Take the full string of voltages and extract the HV value."""
try:
# Calculate V10, the 3.3V reference voltage
reference_voltage = int(sensors_values[37:41], 16) * (2.048 / 4095)
# Calculate HV rails
u1 = int(sensors_values[1:5], 16) * (reference_voltage / 2**12)
# Apply conversion gain # Added 56V following HV tests
hv_monitoring_voltage = u1 * 1621.65 - 1043.22 + 56
return hv_monitoring_voltage
except ValueError as e:
logging.error("VSR %s: Error obtaining HV value: %s" %
(format(self.vsr_addr, '#02x'), e))
return -1
def read_temperatures_humidity_values(self):
"""Read and convert sensor data into temperatures and humidity values."""
self.send_cmd([0x23, self.vsr_addr, 0x52, 0x0D], False)
sensors_values = self.read_response()
sensors_values = sensors_values.strip()
if self.debug:
logging.debug("VSR: %s sensors_values: %s len: %s" % (format(self.vsr_addr, '#02x'),
sensors_values, len(sensors_values)))
# Check register value is OK, otherwise sensor values weren't read out
initial_value = -1
try:
initial_value = int(sensors_values[1])
except ValueError as e:
logging.error("Failed to readout intelligible sensor values: %s" % e)
return None
if initial_value == HexitecFem.SENSORS_READOUT_OK:
ambient_hex = sensors_values[1:5]
humidity_hex = sensors_values[5:9]
asic1_hex = sensors_values[9:13]
asic2_hex = sensors_values[13:17]
adc_hex = sensors_values[17:21]
if (self.vsr_addr == HexitecFem.VSR_ADDRESS[0]):
self.vsr1_ambient = self.get_ambient_temperature(ambient_hex)
self.vsr1_humidity = self.get_humidity(humidity_hex)
self.vsr1_asic1 = self.get_asic_temperature(asic1_hex)
self.vsr1_asic2 = self.get_asic_temperature(asic2_hex)
self.vsr1_adc = self.get_adc_temperature(adc_hex)
else:
if (self.vsr_addr == HexitecFem.VSR_ADDRESS[1]):
self.vsr2_ambient = self.get_ambient_temperature(ambient_hex)
self.vsr2_humidity = self.get_humidity(humidity_hex)
self.vsr2_asic1 = self.get_asic_temperature(asic1_hex)
self.vsr2_asic2 = self.get_asic_temperature(asic2_hex)
self.vsr2_adc = self.get_adc_temperature(adc_hex)
else:
logging.warning("VSR 0x%s: Sensor data temporarily unavailable" %
format(self.vsr_addr, '02x'))
def get_ambient_temperature(self, hex_val):
"""Calculate ambient temperature."""
try:
return ((int(hex_val, 16) * 175.72) / 65536) - 46.84
except ValueError as e:
logging.error("Error converting ambient temperature: %s" % e)
return -100
def get_humidity(self, hex_val):
"""Calculate humidity."""
try:
return ((int(hex_val, 16) * 125) / 65535) - 6
except ValueError as e:
logging.error("Error converting humidity: %s" % e)
return -100
def get_asic_temperature(self, hex_val):
"""Calculate ASIC temperature."""
try:
return int(hex_val, 16) * 0.0625
except ValueError as e:
logging.error("Error converting ASIC temperature: %s" % e)
return -100
def get_adc_temperature(self, hex_val):
"""Calculate ADC Temperature."""
try:
return int(hex_val, 16) * 0.0625
except ValueError as e:
logging.error("Error converting ADC temperature: %s" % e)
return -100
def _set_hexitec_config(self, filename):
"""Check whether file exists, load parameters from file."""
filename = self.base_path + filename
try:
with open(filename, 'r') as f: # noqa: F841
pass
self.hexitec_config = filename
logging.debug("hexitec_config: '%s' Filename: '%s'" % (self.hexitec_config, filename))
except IOError as e:
logging.error("Cannot open provided hexitec file: %s" % e)
raise ParameterTreeError("Error: %s" % e)
self.read_ini_file(self.hexitec_config, self.hexitec_parameters, debug=False)
bias_refresh_interval = self._extract_integer(self.hexitec_parameters,
'Bias_Voltage/Bias_Refresh_Interval',
bit_range=32)
if bias_refresh_interval > -1:
self.bias_refresh_interval = bias_refresh_interval / 1000.0
bias_voltage_refresh = self._extract_boolean(self.hexitec_parameters,
'Bias_Voltage/Bias_Voltage_Refresh')
if bias_voltage_refresh > -1:
self.bias_voltage_refresh = bias_voltage_refresh
time_refresh_voltage_held = self._extract_integer(self.hexitec_parameters,
'Bias_Voltage/Time_Refresh_Voltage_Held',
bit_range=32)
if time_refresh_voltage_held > -1:
self.time_refresh_voltage_held = time_refresh_voltage_held / 1000.0
bias_voltage_settle_time = self._extract_integer(self.hexitec_parameters,
'Bias_Voltage/Bias_Voltage_Settle_Time',
bit_range=32)
if bias_voltage_settle_time > -1:
self.time_refresh_voltage_held = time_refresh_voltage_held / 1000.0
self.bias_voltage_settle_time = bias_voltage_settle_time / 1000.0
# Recalculate frame rate
self.row_s1 = self._extract_integer(self.hexitec_parameters, 'Control-Settings/Row -> S1',
bit_range=14)
self.s1_sph = self._extract_integer(self.hexitec_parameters, 'Control-Settings/S1 -> Sph',
bit_range=6)
self.sph_s2 = self._extract_integer(self.hexitec_parameters, 'Control-Settings/Sph -> S2',
bit_range=6)
self.calculate_frame_rate()
def convert_string_exponential_to_integer(self, exponent):
"""Convert aspect format to fit dac format.
Aspect's exponent format looks like: 1,003000E+2
Convert to float (eg: 100.3), rounding to nearest
int before scaling to fit DAC range.
"""
number_string = str(exponent)
number_string = number_string.replace(",", ".")
number_float = float(number_string)
number_int = int(round(number_float))
return number_int
# number_scaled = int(number_int // self.DAC_SCALE_FACTOR)
def _extract_exponential(self, parameter_dict, descriptor, bit_range):
"""Extract exponential descriptor from parameter_dict, check it's within bit_range."""
valid_range = [0, 1 << bit_range]
setting = -1
try:
unscaled_setting = parameter_dict[descriptor]
scaled_setting = self.convert_string_exponential_to_integer(unscaled_setting)
if scaled_setting >= valid_range[0] and scaled_setting <= valid_range[1]:
setting = int(scaled_setting // self.DAC_SCALE_FACTOR)
else:
logging.error("Error parsing %s, got: %s (scaled: % s) but valid range: %s-%s" %
(descriptor, unscaled_setting, scaled_setting, valid_range[0],
valid_range[1]))
setting = -1
except KeyError:
logging.warning("Warning: No '%s' Key defined!" % descriptor)
return setting
def convert_aspect_float_to_dac_value(self, number_float):
"""Convert aspect float format to fit dac format.
Convert float (eg: 1.3V) to mV (*1000), scale to fit DAC range
before rounding to nearest int.
"""
milli_volts = number_float * 1000
number_scaled = int(round(milli_volts // self.DAC_SCALE_FACTOR))
return number_scaled
def _extract_float(self, parameter_dict, descriptor):
"""Extract descriptor from parameter_dict, check within 0.0 - 3.0 (hardcoded) range."""
valid_range = [0.0, 3.0]
setting = -1
try:
setting = float(parameter_dict[descriptor])
if setting >= valid_range[0] and setting <= valid_range[1]:
# Convert from volts to DAQ format
setting = self.convert_aspect_float_to_dac_value(setting)
else:
logging.error("Error parsing float %s, got: %s but valid range: %s-%s" %
(descriptor, setting, valid_range[0], valid_range[1]))
setting = -1
except KeyError:
logging.warning("Warning: No '%s' Key defined!" % descriptor)
return setting
def _extract_integer(self, parameter_dict, descriptor, bit_range):
"""Extract integer descriptor from parameter_dict, check it's within bit_range."""
valid_range = [0, 1 << bit_range]
setting = -1
try:
setting = int(parameter_dict[descriptor])
if setting >= valid_range[0] and setting <= valid_range[1]:
pass
else:
logging.error("Error parsing parameter %s, got: %s but valid range: %s-%s" %
(descriptor, setting, valid_range[0], valid_range[1]))
setting = -1
except KeyError:
logging.warning("Warning: No '%s' Key defined!" % descriptor)
return setting
def _extract_boolean(self, parameter_dict, descriptor):
"""Extract boolean of descriptor from parameter_dict.
True values: y, yes, t, true, on and 1.
False values: n, no, f, false, off and 0.
"""
try:
parameter = parameter_dict[descriptor]
setting = bool(distutils.util.strtobool(parameter))
except ValueError:
logging.error("ERROR: Invalid choice for %s!" % descriptor)
setting = -1
return setting
def _extract_80_bits(self, parameter_dict, param, vsr, asic, channel_or_block): # noqa: C901
"""Extract 80 bits from four (20 bit) channels, assembling one ASIC's row/column."""
# key = "ColumnEn_"
# vsr = 1
# asic = 1
# channel_or_block = "Channel"
# Example Column variable: 'Sensor-Config_V1_S1/ColumnEn_1stChannel'
# Examples Row variable: 'Sensor-Config_V1_S1/RowPwr4thBlock'
bDebug = False
# if param == "ColumnCal":
# bDebug = True
# TODO: Bit clunky returning so many -1 tuples. Better solution for no ini file loaded?
aspect_list = [(-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1), (-1, -1),
(-1, -1), (-1, -1), (-1, -1)]
key = 'Sensor-Config_V%s_S%s/%s1st%s' % (vsr, asic, param, channel_or_block)
try:
first_channel = self.extract_channel_data(parameter_dict, key)
except KeyError:
logging.debug("WARNING: Missing key %s - was .ini file loaded?" % key)
return aspect_list
key = 'Sensor-Config_V%s_S%s/%s2nd%s' % (vsr, asic, param, channel_or_block)
try:
second_channel = self.extract_channel_data(parameter_dict, key)
except KeyError:
logging.debug("WARNING: Missing key %s - was .ini file loaded?" % key)
return aspect_list
key = 'Sensor-Config_V%s_S%s/%s3rd%s' % (vsr, asic, param, channel_or_block)
try:
third_channel = self.extract_channel_data(parameter_dict, key)
except KeyError:
logging.debug("WARNING: Missing key %s - was .ini file loaded?" % key)
return aspect_list
key = 'Sensor-Config_V%s_S%s/%s4th%s' % (vsr, asic, param, channel_or_block)
try:
fourth_channel = self.extract_channel_data(parameter_dict, key)
except KeyError:
logging.debug("WARNING: Missing key %s - was .ini file loaded?" % key)
return aspect_list
entirety = first_channel + second_channel + third_channel + fourth_channel
if bDebug: # pragma: no cover
print(" 1st: %s" % first_channel)
print(" 2nd: %s" % second_channel)
print(" 3rd: %s" % third_channel)
print(" 4th: %s" % fourth_channel)
print(" entirety: %s" % entirety)
# Convert string to bytes (to support Python 3)
entirety = entirety.encode("utf-8")
# Pixels appear in 8 bit reverse order, reverse bit order accordingly
# More info: https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html
big_end_arr = np.ndarray(shape=(10,), dtype='>i8', buffer=entirety)
rev_order = big_end_arr.byteswap()
entirety = rev_order.tobytes()
byte_list = []
for index in range(0, len(entirety), 8):
byte_list.append(entirety[index:index + 8])
aspect_list = []
for binary in byte_list:
decimal = int(binary, 2)
aspect = self.convert_to_aspect_format(decimal)
aspect_list.append(aspect)
if bDebug: # pragma: no cover
print("\t\tVSR: %s bin: %s dec: %s" % (vsr, binary, "{:02x}".format(decimal)))
# Turns aspect_list into tupples of (MSB, LSB), e.g.
# [(70, 70), (70, 70), (70, 70), (70, 70), (70, 70), (70, 70), (70, 70), (69, 55),
# (57, 53), (51, 49)]
return aspect_list
def extract_channel_data(self, parameter_dict, key):
"""Extract value of key from parameters_dict's dictionary."""
channel = parameter_dict[key]
if len(channel) != 20:
logging.error("Invalid length (%s != 20) detected in key: %s" % (len(channel), key))
raise HexitecFemError("Invalid length of value in '%s'" % key)
return channel
def convert_to_aspect_format(self, value):
"""Convert integer to Aspect's hexadecimal notation e.g. 31 (0x1F) -> 0x31, 0x46."""
hex_string = "{:02x}".format(value)
high_string = hex_string[0]
low_string = hex_string[1]
high_int = int(high_string, 16)
low_int = int(low_string, 16)
high_encoded = self.HEX_ASCII_CODE[high_int]
low_encoded = self.HEX_ASCII_CODE[low_int]
return high_encoded, low_encoded
def read_ini_file(self, filename, parameter_dict, debug=False):
"""Read filename, parse case sensitive keys decoded as strings."""
parser = configparser.ConfigParser()
if debug: # pragma: no cover
print("---------------------------------------------------------------------")
# Maintain case-sensitivity:
parser.optionxform = str
parser.read(filename)
for section in parser.sections():
if debug: # pragma: no cover
print("Section: ", section)
for key, value in parser.items(section):
parameter_dict[section + "/" + key] = value.strip("\"")
if debug: # pragma: no cover
print(" " + section + "/" + key + " => " + value.strip("\""))
if debug: # pragma: no cover
print("---------------------------------------------------------------------")
def debug_register(self, msb, lsb): # pragma: no cover
"""Debug function: Display contents of register."""
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[1], HexitecFem.READ_REG_VALUE,
msb, lsb, 0x0D])
vsr2 = self.read_response().strip("\r")
self.send_cmd([0x23, HexitecFem.VSR_ADDRESS[0], HexitecFem.READ_REG_VALUE,
msb, lsb, 0x0D])
vsr1 = self.read_response().strip("\r")
return (vsr2, vsr1)
def dump_all_registers(self): # pragma: no cover
"""Dump register 0x00 - 0xff contents to screen.
aSpect's address format: 0x3F -> 0x33, 0x46 (i.e. msb, lsb)
See HEX_ASCII_CODE, and section 3.3, page 11 of revision 0.5:
aS_AM_Hexitec_VSR_Interface.pdf
"""
for msb in range(16):
for lsb in range(16):
(vsr2, vsr1) = self.debug_register(self.HEX_ASCII_CODE[msb], self.HEX_ASCII_CODE[lsb])
print("Register: {}{}: Address words: {} ({}) {} ({}), VSR2: {} VSR1: {}".format(hex(msb), hex(lsb)[-1],
self.HEX_ASCII_CODE[msb], hex(self.HEX_ASCII_CODE[msb]),
self.HEX_ASCII_CODE[lsb], hex(self.HEX_ASCII_CODE[lsb]),
vsr2, vsr1))
class HexitecFemError(Exception):
"""Simple exception class for HexitecFem to wrap lower-level exceptions."""
pass
|
"""Game of Life"""
# старая посылка
# https://contest.yandex.ru/contest/29167/run-report/54247495/
# новая посылка
# https://contest.yandex.ru/contest/29167/run-report/54829714/
import pygame
# параметры экрана
DISPLAY_WIDTH = 620
DISPLAY_HEIGHT = 480
# размер клетки
CELL_SIZE = 20
# цвета
WHITE = (255, 255, 255, 255)
BLACK = (0, 0, 0, 255)
SILVER = (192, 192, 192, 255)
# cell(r, c) возвращает состояние клетки
# функция сделана для того, чтобы клетки вне доски считались мертвыми
def cell(generation, row, col):
if 0 <= row < len(generation):
if 0 <= col < len(generation[0]):
return generation[row][col]
return 0
# подсчет количества живых соседних клеток
# считается сумма состояний клеток матрицы 3*3\
# с центром в выбранной за исключением ее самой
def num_alive(generation, row, col):
return cell(generation, row+1, col+1)\
+ cell(generation, row+1, col)\
+ cell(generation, row+1, col-1)\
+ cell(generation, row, col+1)\
+ cell(generation, row, col-1)\
+ cell(generation, row-1, col+1)\
+ cell(generation, row-1, col)\
+ cell(generation, row-1, col-1)
# следующее поколение
def get_next_generation(generation):
# создание пустого поля
next_generation = [[0]*len(generation[0]) for i in range(len(generation))]
for row in range(len(generation)):
for col in range(len(generation[0])):
# замена 0 на 1, если клетка соответствует условиям
if num_alive(generation, row, col) == 3:
next_generation[row][col] = 1
elif (cell(generation, row, col) == 1
and num_alive(generation, row, col) == 2):
next_generation[row][col] = 1
return next_generation
def draw_generation(display, generation):
for row in range(len(generation)):
for col in range(len(generation[0])):
# в условиях прописано != BLACK/WHITE чтобы
# не рисовать уже существующие клетки
if cell(generation, row, col) == 1\
and display.get_at((
row*CELL_SIZE+1,
col*CELL_SIZE+1
)) != BLACK:
# +/- 1 для избежания перекрытия с сеткой
pygame.draw.rect(display, BLACK, [row*CELL_SIZE+1,
col*CELL_SIZE+1,
CELL_SIZE-1,
CELL_SIZE-1])
elif cell(generation, row, col) == 0\
and display.get_at((
row*CELL_SIZE+1,
col*CELL_SIZE+1
)) != WHITE:
pygame.draw.rect(display, WHITE, [row*CELL_SIZE+1,
col*CELL_SIZE+1,
CELL_SIZE-1,
CELL_SIZE-1])
def game_loop():
pygame.init()
gameDisplay = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
pygame.display.set_caption('Game of Life')
# сетка
for i in range(0, DISPLAY_HEIGHT, CELL_SIZE):
pygame.draw.line(gameDisplay, SILVER, (0, i), (DISPLAY_WIDTH, i))
for i in range(0, DISPLAY_WIDTH, CELL_SIZE):
pygame.draw.line(gameDisplay, SILVER, (i, 0), (i, DISPLAY_HEIGHT))
# задаем начальную генерацию,
gen = [[0]*(DISPLAY_HEIGHT//CELL_SIZE)
for i in range(DISPLAY_WIDTH//CELL_SIZE)]
# для удобства изначально прописаны циклы длины 2 и 3
for i, j in Cycle_3:
gen[i][j] = 1
draw_generation(gameDisplay, gen)
pygame.display.update()
finished = 0
while not finished:
for event in pygame.event.get():
# блок клавиш
if event.type == pygame.KEYDOWN:
# новое поколение при RIGHT
if event.key == pygame.K_RIGHT:
gen = get_next_generation(gen)
draw_generation(gameDisplay, gen)
# отчистка поля при SPACE
elif event.key == pygame.K_SPACE:
gen = [[0]*(DISPLAY_HEIGHT//CELL_SIZE)
for i in range(DISPLAY_WIDTH//CELL_SIZE)]
draw_generation(gameDisplay, gen)
# блок мыши
elif event.type == pygame.MOUSEBUTTONDOWN:
# при нажати LMB, если клетка не черная/живая/1,
# на перекрашивается в черный и gen[row][col] = 1
if event.button == 1\
and gameDisplay.get_at(event.pos) != BLACK:
row, col = map(lambda x: x//CELL_SIZE, event.pos)
gen[row][col] = 1
pygame.draw.rect(gameDisplay, BLACK, [row*CELL_SIZE+1,
col*CELL_SIZE+1,
CELL_SIZE-1,
CELL_SIZE-1])
# при нажати RMB, если клетка не белая/мертвая/0,
# на перекрашивается в белый и gen[row][col] = 0
elif event.button == 3\
and gameDisplay.get_at(event.pos) != WHITE:
row, col = map(lambda x: x//CELL_SIZE, event.pos)
gen[row][col] = 0
pygame.draw.rect(gameDisplay, WHITE, [row*CELL_SIZE+1,
col*CELL_SIZE+1,
CELL_SIZE-1,
CELL_SIZE-1])
elif event.type == pygame.QUIT:
finished = 1
pygame.display.update()
pygame.quit()
Cycle_2 = [[4, 4], [4, 5], [4, 6]]
Cycle_3 = [[2, 4], [2, 5], [2, 6],
[2, 10], [2, 11], [2, 12],
[4, 2], [5, 2], [6, 2],
[4, 7], [5, 7], [6, 7],
[4, 9], [5, 9], [6, 9],
[4, 14], [5, 14], [6, 14],
[7, 4], [7, 5], [7, 6],
[7, 10], [7, 11], [7, 12],
[9, 4], [9, 5], [9, 6],
[9, 10], [9, 11], [9, 12],
[10, 2], [11, 2], [12, 2],
[10, 7], [11, 7], [12, 7],
[10, 9], [11, 9], [12, 9],
[10, 14], [11, 14], [12, 14],
[14, 4], [14, 5], [14, 6],
[14, 10], [14, 11], [14, 12]]
if __name__ == "__main__":
game_loop()
|
#!/usr/bin/python
import os
import numpy as np
from manipulation.grip.robotiq85 import rtq85nm
from panda3d.bullet import BulletWorld
from panda3d.core import *
import pandaplotutils.pandactrl as pandactrl
import pandaplotutils.pandageom as pandageom
from manipulation.grip import freegripcontactpairs as fgcp
from utils import collisiondetection as cd
from utils import dbcvt as dc
from utils import robotmath as rm
from panda3d.bullet import BulletDebugNode
import database.dbaccess as db
import time
class Freegrip(fgcp.FreegripContactpairs):
def __init__(self, objpath, handpkg, readser=False, torqueresist = 50, dotnormplan=.95, dotnoarmovlp=.95, dotnormpara = -.75):
"""
initialization
:param objpath: path of the object
:param ser: True use pre-computed template file for debug (in order to debug large models like tool.stl
:param torqueresist: the maximum allowable distance to com (see FreegripContactpairs.planContactpairs)
author: weiwei
date: 20161201, osaka
"""
super(self.__class__, self).__init__(objpath, dotnormplan=dotnormplan, dotnoarmovlp=dotnoarmovlp, readser=readser)
if readser is False:
tic = time.time()
self.removeBadSamples(mindist=2, maxdist=20)
toc = time.time()
print ("remove bad sample cost", toc-tic)
tic = time.time()
self.clusterFacetSamplesRNN(reduceRadius=10)
toc = time.time()
print ("cluster samples cost", toc-tic)
tic = time.time()
self.planContactpairs(torqueresist,dotnormpara=dotnormpara)
toc = time.time()
print ("plan contact pairs cost", toc-tic)
self.saveSerialized("tmpcp.pickle")
else:
self.loadSerialized("tmpcp.pickle", objpath)
self.handpkg = handpkg
self.hand = handpkg.newHandNM(hndcolor=[0,1,0,.1])
self.handfgrpcc_uninstanced = handpkg.newHandFgrpcc()
self.handname = handpkg.getHandName()
# gripcontactpairs_precc is the gripcontactpairs ([[p0,p1,p2],[p0',p1',p2']] pairs) after precc (collision free)
# gripcontactpairnormals_precc is the gripcontactpairnormals ([[n0,n1,n2],[n0',n1',n2']] pairs) after precc
# likewise, gripcontactpairfacets_precc is the [faceid0, faceid1] pair corresponding to the upper two
self.gripcontactpairs_precc = None
self.gripcontactpairnormals_precc = None
self.gripcontactpairfacets_precc = None
# the final results: gripcontacts: a list of [cct0, cct1]
# griprotmats: a list of Mat4
# gripcontactnormals: a list of [nrml0, nrml1]
self.gripcontacts = None
self.griprotmats = None
self.gripjawwidth = None
self.gripcontactnormals = None
self.bulletworld = BulletWorld()
# prepare the model for collision detection
self.objgeom = pandageom.packpandageom_fn(self.objtrimesh.vertices, self.objtrimesh.face_normals, self.objtrimesh.faces)
print ("number of vertices", len(self.objtrimesh.vertices))
print ("number of faces", len(self.objtrimesh.faces))
self.objmeshbullnode = cd.genCollisionMeshGeom(self.objgeom)
self.bulletworld.attachRigidBody(self.objmeshbullnode)
# for plot
self.rtq85plotlist = []
self.counter2 = 0
# for dbupdate
self.dbobjname = os.path.splitext(os.path.basename(objpath))[0]
def removeHndcc(self, base, discretesize=8):
"""
Handcc means hand collision detection
:param discretesize: the number of hand orientations
:return:
author: weiwei
date: 20161212, tsukuba
"""
# isplotted = 0
# if self.rtq85plotlist:
# for rtq85plotnode in self.rtq85plotlist:
# rtq85plotnode.removeNode()
# self.rtq85plotlist = []
self.gripcontacts = []
self.griprotmats = []
self.gripjawwidth = []
self.gripcontactnormals = []
plotoffsetfp = 6
self.counter = 0
while self.counter < self.facetpairs.shape[0]:
# print str(self.counter) + "/" + str(self.facetpairs.shape[0]-1)
# print self.gripcontactpairs_precc
facetpair = self.facetpairs[self.counter]
facetidx0 = facetpair[0]
facetidx1 = facetpair[1]
for j, contactpair in enumerate(self.gripcontactpairs_precc[self.counter]):
for angleid in range(discretesize):
cctpnt0 = contactpair[0] + plotoffsetfp * self.facetnormals[facetidx0]
cctpnt1 = contactpair[1] + plotoffsetfp * self.facetnormals[facetidx1]
cctnormal0 = self.gripcontactpairnormals_precc[self.counter][j][0]
cctnormal1 = [-cctnormal0[0], -cctnormal0[1], -cctnormal0[2]]
tmphand = self.hand
# tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[1, 0, 0, .1])
# save initial hand pose
initmat = tmphand.getMat()
fgrdist = np.linalg.norm((cctpnt0 - cctpnt1))
if fgrdist > self.hand.jawwidthopen:
continue
tmphand.setJawwidth(fgrdist)
# tmphand.lookAt(cctnormal0[0], cctnormal0[1], cctnormal0[2])
# rotax = [0, 1, 0]
rotangle = 360.0 / discretesize * angleid
# rotmat = rm.rodrigues(rotax, rotangle)
# tmphand.setMat(pandageom.cvtMat4(rotmat) * tmphand.getMat())
# axx = tmphand.getMat().getRow3(0)
# # 130 is the distance from hndbase to fingertip
# cctcenter = (cctpnt0 + cctpnt1) / 2 + 145 * np.array([axx[0], axx[1], axx[2]])
# tmphand.setPos(Point3(cctcenter[0], cctcenter[1], cctcenter[2]))
fc = (cctpnt0 + cctpnt1)/2.0
tmphand.gripAt(fc[0], fc[1], fc[2], cctnormal0[0], cctnormal0[1], cctnormal0[2], rotangle, jawwidth = fgrdist)
# collision detection
hndbullnode = cd.genCollisionMeshMultiNp(tmphand.handnp, base.render)
result = self.bulletworld.contactTest(hndbullnode)
if not result.getNumContacts():
self.gripcontacts.append(contactpair)
self.griprotmats.append(tmphand.getMat())
self.gripjawwidth.append(fgrdist)
self.gripcontactnormals.append(self.gripcontactpairnormals_precc[self.counter][j])
# reset initial hand pose
tmphand.setMat(initmat)
self.counter+=1
self.counter = 0
def removeFgrpcc(self, base):
"""
Fgrpcc means finger pre collision detection
:return:
author: weiwei
date: 20161212, tsukuba
"""
self.gripcontactpairs_precc = []
self.gripcontactpairnormals_precc = []
self.gripcontactpairfacets_precc = []
plotoffsetfp = 6
self.counter = 0
while self.counter < self.facetpairs.shape[0]:
# print str(self.counter) + "/" + str(self.facetpairs.shape[0]-1)
# print self.gripcontactpairs
self.gripcontactpairs_precc.append([])
self.gripcontactpairnormals_precc.append([])
self.gripcontactpairfacets_precc.append([])
facetpair = self.facetpairs[self.counter]
facetidx0 = facetpair[0]
facetidx1 = facetpair[1]
for j, contactpair in enumerate(self.gripcontactpairs[self.counter]):
cctpnt0 = contactpair[0] + plotoffsetfp * self.facetnormals[facetidx0]
cctpnt1 = contactpair[1] + plotoffsetfp * self.facetnormals[facetidx1]
cctnormal0 = self.facetnormals[facetidx0]
cctnormal1 = [-cctnormal0[0], -cctnormal0[1], -cctnormal0[2]]
handfgrpcc0 = NodePath("handfgrpcc0")
self.handfgrpcc_uninstanced.instanceTo(handfgrpcc0)
handfgrpcc0.setPos(cctpnt0[0], cctpnt0[1], cctpnt0[2])
handfgrpcc0.lookAt(cctpnt0[0] + cctnormal0[0], cctpnt0[1] + cctnormal0[1],
cctpnt0[2] + cctnormal0[2])
handfgrpcc1 = NodePath("handfgrpcc1")
self.handfgrpcc_uninstanced.instanceTo(handfgrpcc1)
handfgrpcc1.setPos(cctpnt1[0], cctpnt1[1], cctpnt1[2])
handfgrpcc1.lookAt(cctpnt1[0] + cctnormal1[0], cctpnt1[1] + cctnormal1[1],
cctpnt1[2] + cctnormal1[2])
handfgrpcc = NodePath("handfgrpcc")
handfgrpcc0.reparentTo(handfgrpcc)
handfgrpcc1.reparentTo(handfgrpcc)
# prepare the model for collision detection
facetmeshbullnode = cd.genCollisionMeshMultiNp(handfgrpcc)
result = self.bulletworld.contactTest(facetmeshbullnode)
if not result.getNumContacts():
self.gripcontactpairs_precc[-1].append(contactpair)
self.gripcontactpairnormals_precc[-1].append(self.gripcontactpairnormals[self.counter][j])
self.gripcontactpairfacets_precc[-1].append(self.gripcontactpairfacets[self.counter])
self.counter += 1
self.counter=0
def saveToDB(self, gdb):
"""
save the result to mysqldatabase
:param gdb: is an object of the GraspDB class in the database package
:return:
author: weiwei
date: 20170110
"""
# save to database
gdb = db.GraspDB()
idhand = gdb.loadIdHand(self.handname)
idobject = gdb.loadIdObject(self.dbobjname)
sql = "SELECT * FROM freeairgrip, object WHERE freeairgrip.idobject LIKE '%s' AND \
freeairgrip.idhand LIKE '%s'" % (idobject, idhand)
result = gdb.execute(sql)
if len(result) > 0:
print( "Grasps already saved "
"or duplicated filename!")
isredo = raw_input("Do you want to overwrite the database? (Y/N)")
if isredo != "Y" and isredo != "y":
print("Grasp planning aborted.")
else:
sql = "DELETE FROM freeairgrip WHERE freeairgrip.idobject LIKE '%s' AND \
freeairgrip.idhand LIKE '%s'" % (idobject, idhand)
gdb.execute(sql)
print (self.gripcontacts)
for i in range(len(self.gripcontacts)):
sql = "INSERT INTO freeairgrip(idobject, contactpnt0, contactpnt1, \
contactnormal0, contactnormal1, rotmat, jawwidth, idhand) \
VALUES('%s', '%s', '%s', '%s', '%s', '%s', '%s', %d)" % \
(idobject, dc.v3ToStr(self.gripcontacts[i][0]), dc.v3ToStr(self.gripcontacts[i][1]),
dc.v3ToStr(self.gripcontactnormals[i][0]), dc.v3ToStr(self.gripcontactnormals[i][1]),
dc.mat4ToStr(self.griprotmats[i]), str(self.gripjawwidth[i]), idhand)
gdb.execute(sql)
def removeFgrpccShow(self, base):
"""
Fgrpcc means finger pre collision detection
This one is specially written for demonstration
:return:
author: weiwei
date: 20161201, osaka
"""
# 6 is used because I am supposing 4+2 where 4 is the default
# margin of bullet in panda3d. (NOTE: This is a guess)
plotoffsetfp = 6
npbrchild = base.render.find("**/tempplot")
if npbrchild:
# npbrchild.removeNode()
pass
# for fast delete
brchild = NodePath('tempplot')
brchild.reparentTo(base.render)
self.counter += 1
if self.counter >= self.facetpairs.shape[0]:
self.counter = 0
facetpair = self.facetpairs[self.counter]
facetidx0 = facetpair[0]
facetidx1 = facetpair[1]
geomfacet0 = pandageom.packpandageom_fn(self.objtrimesh.vertices+
np.tile(plotoffsetfp*self.facetnormals[facetidx0],
[self.objtrimesh.vertices.shape[0],1]),
self.objtrimesh.face_normals[self.facets[facetidx0]],
self.objtrimesh.faces[self.facets[facetidx0]])
geomfacet1 = pandageom.packpandageom_fn(self.objtrimesh.vertices+
np.tile(plotoffsetfp*self.facetnormals[facetidx1],
[self.objtrimesh.vertices.shape[0],1]),
self.objtrimesh.face_normals[self.facets[facetidx1]],
self.objtrimesh.faces[self.facets[facetidx1]])
# show the facetpair
node0 = GeomNode('pair0')
node0.addGeom(geomfacet0)
star0 = NodePath('pair0')
star0.attachNewNode(node0)
facetcolorarray = self.facetcolorarray
star0.setColor(Vec4(facetcolorarray[facetidx0][0], facetcolorarray[facetidx0][1],
facetcolorarray[facetidx0][2], facetcolorarray[facetidx0][3]))
star0.setTwoSided(True)
star0.reparentTo(brchild)
node1 = GeomNode('pair1')
node1.addGeom(geomfacet1)
star1 = NodePath('pair1')
star1.attachNewNode(node1)
star1.setColor(Vec4(facetcolorarray[facetidx1][0], facetcolorarray[facetidx1][1],
facetcolorarray[facetidx1][2], facetcolorarray[facetidx1][3]))
star1.setTwoSided(True)
star1.reparentTo(brchild)
for j, contactpair in enumerate(self.gripcontactpairs[self.counter]):
cctpnt0 = contactpair[0] + plotoffsetfp * self.facetnormals[facetidx0]
cctpnt1 = contactpair[1] + plotoffsetfp * self.facetnormals[facetidx1]
# the following two choices decide the way to detect contacts
cctnormal00 = np.array(self.gripcontactpairnormals[self.counter][j][0])
cctnormal01 = -np.array(self.gripcontactpairnormals[self.counter][j][1])
cctnormal0raw = (cctnormal00 + cctnormal01)
cctnormal0 = (cctnormal0raw/np.linalg.norm(cctnormal0raw)).tolist()
# the following two choices decide the way to detect contacts
cctnormal10 = -cctnormal00
cctnormal11 = -cctnormal01
cctnormal1raw = (cctnormal10 + cctnormal11)
cctnormal1 = (cctnormal1raw/np.linalg.norm(cctnormal1raw)).tolist()
handfgrpcc0 = NodePath("handfgrpcc0")
self.handfgrpcc_uninstanced.instanceTo(handfgrpcc0)
handfgrpcc0.setPos(cctpnt0[0], cctpnt0[1], cctpnt0[2])
handfgrpcc0.lookAt(cctpnt0[0] + cctnormal0[0], cctpnt0[1] + cctnormal0[1], cctpnt0[2] + cctnormal0[2])
handfgrpcc1 = NodePath("handfgrpcc1")
self.handfgrpcc_uninstanced.instanceTo(handfgrpcc1)
handfgrpcc1.setPos(cctpnt1[0], cctpnt1[1], cctpnt1[2])
handfgrpcc1.lookAt(cctpnt1[0] + cctnormal1[0], cctpnt1[1] + cctnormal1[1], cctpnt1[2] + cctnormal1[2])
handfgrpcc = NodePath("handfgrpcc")
handfgrpcc0.reparentTo(handfgrpcc)
handfgrpcc1.reparentTo(handfgrpcc)
# prepare the model for collision detection
facetmeshbullnode = cd.genCollisionMeshMultiNp(handfgrpcc, brchild)
result = self.bulletworld.contactTest(facetmeshbullnode)
for contact in result.getContacts():
cp = contact.getManifoldPoint()
base.pggen.plotSphere(brchild, pos=cp.getLocalPointA(), radius=3, rgba=Vec4(1, 0, 0, 1))
base.pggen.plotSphere(brchild, pos=cp.getLocalPointB(), radius=3, rgba=Vec4(0, 0, 1, 1))
if result.getNumContacts():
handfgrpcc0.setColor(1, 0, 0, .3)
handfgrpcc1.setColor(1, 0, 0, .3)
else:
handfgrpcc0.setColor(1, 1, 1, .3)
handfgrpcc1.setColor(1, 1, 1, .3)
handfgrpcc0.setTransparency(TransparencyAttrib.MAlpha)
handfgrpcc1.setTransparency(TransparencyAttrib.MAlpha)
handfgrpcc0.reparentTo(brchild)
handfgrpcc1.reparentTo(brchild)
base.pggen.plotArrow(star0, spos=cctpnt0,
epos=cctpnt0 + plotoffsetfp*self.facetnormals[facetidx0] + cctnormal0,
rgba=[facetcolorarray[facetidx0][0], facetcolorarray[facetidx0][1],
facetcolorarray[facetidx0][2], facetcolorarray[facetidx0][3]], length=10)
base.pggen.plotArrow(star1, spos=cctpnt1,
epos=cctpnt1 + plotoffsetfp*self.facetnormals[facetidx1] + cctnormal1,
rgba=[facetcolorarray[facetidx1][0], facetcolorarray[facetidx1][1],
facetcolorarray[facetidx1][2], facetcolorarray[facetidx1][3]], length=10)
def removeFgrpccShowLeft(self, base):
"""
Fgrpcc means finger pre collision detection
This one is specially written for demonstration
Plot the available grips
:return:
author: weiwei
date: 20161212, tsukuba
"""
plotoffsetfp = 6
self.counter += 1
if self.counter >= self.facetpairs.shape[0]:
return
else:
print(str(self.counter) + "/" + str(self.facetpairs.shape[0]-1))
facetpair = self.facetpairs[self.counter]
facetidx0 = facetpair[0]
facetidx1 = facetpair[1]
for j, contactpair in enumerate(self.gripcontactpairs[self.counter]):
cctpnt0 = contactpair[0] + plotoffsetfp * self.facetnormals[facetidx0]
cctpnt1 = contactpair[1] + plotoffsetfp * self.facetnormals[facetidx1]
cctnormal0 = self.facetnormals[facetidx0]
cctnormal1 = [-cctnormal0[0], -cctnormal0[1], -cctnormal0[2]]
handfgrpcc0 = NodePath("handfgrpcc0")
self.handfgrpcc_uninstanced.instanceTo(handfgrpcc0)
handfgrpcc0.setPos(cctpnt0[0], cctpnt0[1], cctpnt0[2])
handfgrpcc0.lookAt(cctpnt0[0] + cctnormal0[0], cctpnt0[1] + cctnormal0[1], cctpnt0[2] + cctnormal0[2])
handfgrpcc1 = NodePath("handfgrpcc1")
self.handfgrpcc_uninstanced.instanceTo(handfgrpcc1)
handfgrpcc1.setPos(cctpnt1[0], cctpnt1[1], cctpnt1[2])
handfgrpcc1.lookAt(cctpnt1[0] + cctnormal1[0], cctpnt1[1] + cctnormal1[1], cctpnt1[2] + cctnormal1[2])
handfgrpcc = NodePath("handfgrpcc")
handfgrpcc0.reparentTo(handfgrpcc)
handfgrpcc1.reparentTo(handfgrpcc)
# prepare the model for collision detection
facetmeshbullnode = cd.genCollisionMeshMultiNp(handfgrpcc)
result = self.bulletworld.contactTest(facetmeshbullnode)
if not result.getNumContacts():
handfgrpcc0.setColor(1, 1, 1, .3)
handfgrpcc1.setColor(1, 1, 1, .3)
handfgrpcc0.setTransparency(TransparencyAttrib.MAlpha)
handfgrpcc1.setTransparency(TransparencyAttrib.MAlpha)
handfgrpcc0.reparentTo(base.render)
handfgrpcc1.reparentTo(base.render)
def removeHndccShow(self, base, discretesize=8):
"""
Handcc means hand collision detection
This one is developed for demonstration
This function should be called after executing removeHndcc
:param discretesize: the number of hand orientations
:return: delayTime
author: weiwei
date: 20161212, tsukuba
"""
# isplotted = 0
if self.rtq85plotlist:
for rtq85plotnode in self.rtq85plotlist:
rtq85plotnode.removeNode()
self.rtq85plotlist = []
self.gripcontacts = []
self.griprotmats = []
self.gripjawwidth = []
self.gripcontactnormals = []
plotoffsetfp = 6
if self.counter2 == 0:
self.counter += 1
if self.counter >= self.facetpairs.shape[0]:
self.counter = 0
self.counter2 += 1
if self.counter2 >= discretesize:
self.counter2 = 0
print (str(self.counter) + "/" + str(self.facetpairs.shape[0]-1))
facetpair = self.facetpairs[self.counter]
facetidx0 = facetpair[0]
facetidx1 = facetpair[1]
for j, contactpair in enumerate(self.gripcontactpairs_precc[self.counter]):
if j == 0:
print (j, contactpair)
# for angleid in range(discretesize):
angleid = self.counter2
cctpnt0 = contactpair[0] + plotoffsetfp * self.facetnormals[facetidx0]
cctpnt1 = contactpair[1] + plotoffsetfp * self.facetnormals[facetidx1]
cctnormal0 = self.gripcontactpairnormals_precc[self.counter][j][0]
cctnormal1 = [-cctnormal0[0], -cctnormal0[1], -cctnormal0[2]]
tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[1, 0, 0, .1])
# save initial hand pose
fgrdist = np.linalg.norm((cctpnt0 - cctpnt1))
if fgrdist > self.hand.jawwidthopen:
continue
# tmprtq85.setJawwidth(fgrdist)
# since fgrpcc already detects inner collisions
rotangle = 360.0 / discretesize * angleid
fc = (cctpnt0 + cctpnt1) / 2.0
tmprtq85.gripAt(fc[0], fc[1], fc[2], cctnormal0[0], cctnormal0[1], cctnormal0[2], rotangle, fgrdist)
# tmprtq85.lookAt(cctnormal0[0], cctnormal0[1], cctnormal0[2])
# rotax = [0, 1, 0]
# rotangle = 360.0 / discretesize * angleid
# rotmat = rm.rodrigues(rotax, rotangle)
# tmprtq85.setMat(pandageom.cvtMat4(rotmat) * tmprtq85.getMat())
# axx = tmprtq85.getMat().getRow3(0)
# # 130 is the distance from hndbase to fingertip
# cctcenter = (cctpnt0 + cctpnt1) / 2 + 145 * np.array([axx[0], axx[1], axx[2]])
# tmprtq85.setPos(Point3(cctcenter[0], cctcenter[1], cctcenter[2]))
# collision detection
self.hndbullnode = cd.genCollisionMeshMultiNp(tmprtq85.rtq85np, base.render)
result = self.bulletworld.contactTest(self.hndbullnode)
if not result.getNumContacts():
self.gripcontacts.append(contactpair)
self.griprotmats.append(tmprtq85.getMat())
self.gripjawwidth.append(fgrdist)
self.gripcontactnormals.append(self.gripcontactpairnormals_precc[self.counter][j])
# pandageom.plotDumbbell(base.render, (cctpnt0+cctpnt1)/2, cctcenter, length=245, thickness=5, rgba=[.4,.4,.4,1])
# pandageom.plotAxisSelf(base.render, (cctpnt0+cctpnt1)/2+245*np.array([axx[0], axx[1], axx[2]]),
# tmprtq85.getMat(), length=30, thickness=2)
tmprtq85.setColor([1, 1, 1, .3])
tmprtq85.reparentTo(base.render)
self.rtq85plotlist.append(tmprtq85)
# isplotted = 1
else:
# for contact in result.getContacts():
# cp = contact.getManifoldPoint()
# pandageom.plotSphere(brchild, pos=cp.getLocalPointA(), radius=3, rgba=Vec4(1, 0, 0, 1))
# pandageom.plotSphere(brchild, pos=cp.getLocalPointB(), radius=3, rgba=Vec4(0, 0, 1, 1))
tmprtq85.setColor([.5, 0, 0, .3])
tmprtq85.reparentTo(base.render)
self.rtq85plotlist.append(tmprtq85)
def plotObj(self):
geomnodeobj = GeomNode('obj')
geomnodeobj.addGeom(self.objgeom)
npnodeobj = NodePath('obj')
npnodeobj.attachNewNode(geomnodeobj)
npnodeobj.reparentTo(base.render)
def showAllGrips(self):
"""
showAllGrips
:return:
author: weiwei
date: 20170206
"""
print( "num of grasps", len(self.gripcontacts))
# for i in range(len(self.gripcontacts)):
# # for i in range(2,3):
# hndrotmat = self.griprotmats[i]
# hndjawwidth = self.gripjawwidth[i]
# # show grasps
# # tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[.7, .7, 0.7, .7])
# # tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[0, 1, 0, .5])
# tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[1, 1, 1, .5])
# tmprtq85.setMat(pandanpmat4=hndrotmat)
# tmprtq85.setJawwidth(hndjawwidth)
# # tmprtq85.setJawwidth(80)
# tmprtq85.reparentTo(base.render)
class FreeAirGrip(object):
"""
access data from db
"""
def __init__(self, gdb, objname, handpkg):
freeairgripdata = gdb.loadFreeAirGrip(objname, handname = handpkg.getHandName())
if freeairgripdata is None:
raise ValueError("Plan the freeairgrip first!")
self.freegripids = freeairgripdata[0]
self.freegripcontacts = freeairgripdata[1]
self.freegripnormals = freeairgripdata[2]
self.freegriprotmats = freeairgripdata[3]
self.freegripjawwidth = freeairgripdata[4]
if __name__=='__main__':
# def updateworld(world, task):
# world.doPhysics(globalClock.getDt())
# return task.cont
base = pandactrl.World(camp=[700,300,700], lookatp=[0,0,100])
this_dir, this_filename = os.path.split(__file__)
# objpath = os.path.join(this_dir, "objects", "sandpart.stl")
# objpath = os.path.join(this_dir, "objects", "ttube.stl")
# objpath = os.path.join(this_dir, "objects", "tool.stl")
# objpath = os.path.join(this_dir, "objects", "tool2.stl")
# objpath = os.path.join(this_dir, "objects", "planewheel.stl")
# objpath = os.path.join(this_dir, "objects", "planelowerbody.stl")
# objpath = os.path.join(this_dir, "objects", "planefrontstay.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay2.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay22.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay23.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay24.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay26.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay28.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay212.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay215.stl")
objpath = os.path.join(this_dir, "objects", "housing.stl")
# objpath = os.path.join(this_dir, "objects", "housingshaft.stl")
# objpath = os.path.join(this_dir, "objects", "bunnysim.stl")
handpkg = rtq85nm
freegriptst = Freegrip(objpath, handpkg, readser=False, torqueresist = 50)
freegriptst.segShow(base, togglesamples=False, togglenormals=False,
togglesamples_ref=False, togglenormals_ref=False,
togglesamples_refcls=False, togglenormals_refcls=False, alpha = 1)
# objpath0 = os.path.join(this_dir, "objects", "ttube.stl")
# objpath1 = os.path.join(this_dir, "objects", "tool.stl")
# objpath2 = os.path.join(this_dir, "objects", "planewheel.stl")
# objpath3 = os.path.join(this_dir, "objects", "planelowerbody.stl")
# objpath4 = os.path.join(this_dir, "objects", "planefrontstay.stl")
# objpath5 = os.path.join(this_dir, "objects", "planerearstay.stl")
# objpaths = [objpath0, objpath1, objpath2, objpath3, objpath4, objpath5]
# import time
# fo = open("foo.txt", "w")
# for objpath in objpaths:
# tic = time.perf_counter()
# freegriptst = Freegrip(objpath, ser=False, torqueresist = 50)
# freegriptst.removeFgrpcc(base)
# freegriptst.removeHndcc(base)
# toc = time.perf_counter()
# print toc-tic
# fo.write(os.path.basename(objpath)+' '+str(toc-tic)+'\n')
# fo.close()
# geom = None
# for i, faces in enumerate(freegriptst.objtrimesh.facets()):
# rgba = [np.random.random(),np.random.random(),np.random.random(),1]
# # geom = pandageom.packpandageom(freegriptst.objtrimesh.vertices, freegriptst.objtrimesh.face_normals[faces], freegriptst.objtrimesh.faces[faces])
# # compute facet normal
# facetnormal = np.sum(freegriptst.objtrimesh.face_normals[faces], axis=0)
# facetnormal = facetnormal/np.linalg.norm(facetnormal)
# geom = pandageom.packpandageom(freegriptst.objtrimesh.vertices +
# np.tile(0 * facetnormal,
# [freegriptst.objtrimesh.vertices.shape[0], 1]),
# freegriptst.objtrimesh.face_normals[faces],
# freegriptst.objtrimesh.faces[faces])
# node = GeomNode('piece')
# node.addGeom(geom)
# star = NodePath('piece')
# star.attachNewNode(node)
# star.setColor(Vec4(rgba[0],rgba[1],rgba[2],rgba[3]))
# # star.setColor(Vec4(.7,.4,0,1))
# star.setTwoSided(True)
# star.reparentTo(base.render)
# freegriptst.removeFgrpcc(base)
# def updateshow(task):
# freegriptst.pairShow(base, togglecontacts=True, togglecontactnormals=True)
# # print task.delayTime
# # if abs(task.delayTime-13) < 1:
# # task.delayTime -= 12.85
# return task.again
#
# taskMgr.doMethodLater(.5, updateshow, "tickTask")
tic = time.time()
freegriptst.removeFgrpcc(base)
toc = time.time()
print ("remove finger pre cc cost", toc-tic)
tic = time.time()
freegriptst.removeHndcc(base, discretesize=16)
toc = time.time()
print ("remove hand cc cost", toc-tic)
# # #
gdb = db.GraspDB()
freegriptst.saveToDB(gdb)
#
# def updateshow(task):
# # freegriptst.removeFgrpccShow(base)
# # freegriptst.removeFgrpccShowLeft(base)
# freegriptst.removeHndccShow(base)
# # # print task.delayTime
# # # if abs(task.delayTime-13) < 1:
# # # task.delayTime -= 12.85
# return task.again
# taskMgr.doMethodLater(.3, updateshow, "tickTask")
# taskMgr.add(updateshow, "tickTask")
# freegriptst.removeFgrpcc(base)
# freegriptst.removeHndcc(base)
# def updateworld(world, task):
# world.doPhysics(globalClock.getDt())
# return task.cont
#
# base.taskMgr.add(updateworld, "updateworld", extraArgs=[freegriptst.bulletworld], appendTask=True)
#
# debugNode = BulletDebugNode('Debug')
# debugNode.showWireframe(True)
# debugNode.showConstraints(True)
# debugNode.showBoundingBoxes(False)
# debugNode.showNormals(False)
# bullcldrnp = base.render.attachNewNode("bulletcollider")
# debugNP = bullcldrnp.attachNewNode(debugNode)
# debugNP.show()
# freegriptst.bulletworld.setDebugNode(debugNP.node())
# taskMgr.add(updateworld, "updateworld", extraArgs=[freegriptst.bulletworld], appendTask=True)
# freegriptst.showAllGrips()
data = gdb.loadFreeAirGrip('planerearstay22', 'rtq85')
if data:
freegripid, freegripcontacts, freegripnormals, freegriprotmats, freegripjawwidth = data
print( len(freegripid))
for i, freegriprotmat in enumerate(freegriprotmats):
# if i>120 and i-120 < 30:
rtqhnd = rtq85nm.Rtq85NM(hndcolor=[1, 1, 1, .2])
rtqhnd.setMat(pandanpmat4=freegriprotmat)
rtqhnd.setJawwidth(freegripjawwidth[i])
rtqhnd.reparentTo(base.render)
# dcam = loader.loadShader("depthmap.sha")
# base.render.setShader(dcam)
# base.render.setShaderAuto()
base.run() |
from typing import List
from clingo import ast
# pylint: disable=all
def atom(location: ast.Location, positive: bool, name: str, arguments: List) -> ast.AST:
"""
Helper function to create an atom.
Arguments:
location -- Location to use.
positive -- Classical sign of the atom.
name -- The name of the atom.
arguments -- The arguments of the atom.
"""
ret = ast.Function(location, name, arguments, False)
if not positive:
ret = ast.UnaryOperation(location, ast.UnaryOperator.Minus, ret)
return ast.SymbolicAtom(ret)
|
import copy
import json
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import (APIRequestFactory, APIClient,
force_authenticate)
from rest_framework import status
from document_index.views import GroupList, GroupDetail
from document_index.models import Group, GroupTreeList
import document_index
from factories import GroupTreeListFactory, GroupFactory
class GetGroupPostData(object):
"""
Supporting class. Reduce duplication.
"""
group_post_data = {
'name': 'Group Node Name Root',
'description': 'Group Node Description Root',
'comment': 'Group Node Comment Root',
}
@classmethod
def get_group_post_data(cls):
return copy.copy(cls.group_post_data)
class GroupListViewTest(TestCase):
"""
TODO: This class may be superceded.
"""
def setUp(self):
self.user = User.objects.create_user(
username='test', email='test@_', password='secret')
def test_group_list_with_tree(self):
GroupTreeListFactory(name='test').save()
client = APIClient()
client.login(username='test', password='secret')
response = client.get('/groups/parent/0/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_group_list_without_tree(self):
client = APIClient()
client.login(username='test', password='secret')
response = client.get('/groups/parent/0/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
class GroupListCreateViewWithoutTreeTest(TestCase):
"""
Test group create without pre-existing tree.
"""
def setUp(self):
self.user = User.objects.create_user(
username='test', email='test@_', password='secret')
def test_create_group_post_root_without_tree(self):
"""Create a new group via POST before tree exists."""
data = GetGroupPostData.get_group_post_data()
factory = APIRequestFactory()
view = GroupList.as_view()
request = factory.post('/groups/parent/', data, format='json')
force_authenticate(request, self.user)
response = view(request, pk=0)
group = Group.objects.get(name='Group Node Name Root')
tree = GroupTreeList.objects.get(name='test')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(group.description, 'Group Node Description Root')
self.assertEqual(tree.name, 'test')
class GroupListCreateViewWithTreeTest(TestCase):
"""
Test group list and create view.
"""
def setUp(self):
"""Setup the tests."""
self.user = User.objects.create_user(
username='test', email='test@_', password='secret')
self.tree = GroupTreeListFactory(name='test')
self.tree.save()
tree_id = self.tree.id
self.group = Group.add_root(tree_id=tree_id, owner=self.user,
name='test group name', description='test group description',
comment='test group comment')
def test_create_group_post_root(self):
"""Create a new root group via POST."""
data = GetGroupPostData.get_group_post_data()
factory = APIRequestFactory()
view = GroupList.as_view()
request = factory.post('/groups/parent/0/', data, format='json')
force_authenticate(request, self.user)
response = view(request, pk=0)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_group_post_child(self):
"""Create a new child group via POST."""
data = GetGroupPostData.get_group_post_data()
factory = APIRequestFactory()
view = GroupList.as_view()
request = factory.post('/groups/parent/', data, format='json')
force_authenticate(request, self.user)
response = view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_list_group_get(self):
"""
GET a list of groups consisting of only 1 which was created in setUp().
"""
factory = APIRequestFactory()
view = GroupList.as_view()
request = factory.get('/groups/parent/0/')
force_authenticate(request, self.user)
response = view(request, pk=0)
self.assertEqual(response.data[0]['name'], 'test group name')
self.assertEqual(response.data[0]['description'],
'test group description')
self.assertEqual(response.data[0]['comment'], 'test group comment')
self.assertEqual(response.data[0]['owner'], 'test')
class GroupDetailRetrieveUpdateDestroyTest(TestCase):
"""
Tests for the GroupDetail view which implements Retrieve, Update, and
Destroy methods.
"""
def setUp(self):
"""Setup the tests."""
self.user = User.objects.create_user(
username='test', email='test@_', password='secret')
self.tree = GroupTreeListFactory(name='test')
self.tree.save()
tree_id = self.tree.id
self.group1 = Group.add_root(tree_id=tree_id, owner=self.user,
name='test group 1 name', description='test group 1 description',
comment='test group 1 comment')
self.group2 = Group.add_root(tree_id=tree_id, owner=self.user,
name='test group 2 name', description='test group 2 description',
comment='test group 2 comment')
self.group3 = Group.add_root(tree_id=tree_id, owner=self.user,
name='test group 3 name', description='test group 3 description',
comment='test group 3 comment')
self.group4 = Group.add_root(tree_id=tree_id, owner=self.user,
name='test group 4 name', description='test group 4 description',
comment='test group 4 comment')
def test_group_move(self):
"""
Move group node to new parent.
"""
client = APIClient()
client.login(username='test', password='secret')
gnode1 = Group.objects.get(name='test group 1 name')
gnode2 = Group.objects.get(name='test group 2 name')
move_url = '/groups/{0}/move/'.format(gnode2.id)
move_data = {'parent': gnode1.id}
response = client.patch(move_url, data=json.dumps(move_data),
content_type='application/json')
if response.status_code == 200:
gnode_parent = Group.objects.get(name='test group 1 name')
self.assertEqual(gnode1.id, gnode_parent.id)
else:
self.assertTrue(False)
def test_group_update_put(self):
gnode3 = Group.objects.get(name='test group 3 name')
put_data = {'name': 'new test group 3 name',
'description': 'new test group 3 description',
'comment': 'new test group 3 comment',}
client = APIClient()
client.login(username='test', password='secret')
put_url = '/groups/{0}/'.format(gnode3.id)
response = client.put(put_url, data=json.dumps(put_data),
content_type='application/json')
if response.status_code == status.HTTP_200_OK:
pass
new_gnode3 = Group.objects.get(id=gnode3.id)
self.assertEqual(new_gnode3.name, 'new test group 3 name')
self.assertEqual(new_gnode3.description,
'new test group 3 description')
self.assertEqual(new_gnode3.comment, 'new test group 3 comment')
else:
self.assertTrue(False)
def test_group_update_patch(self):
gnode4 = Group.objects.get(name='test group 4 name')
patch_data = {'name': 'new test group 4 name',}
client = APIClient()
client.login(username='test', password='secret')
patch_url = '/groups/{0}/'.format(gnode4.id)
response = client.patch(patch_url, data=json.dumps(patch_data),
content_type='application/json')
if response.status_code == status.HTTP_200_OK:
new_gnode4 = Group.objects.get(id=gnode4.id)
self.assertEqual(new_gnode4.name, 'new test group 4 name')
else:
self.assertTrue(False)
|
#!/usr/bin/env python3
import numpy as np
import csv
import sys
################################# Parameters ##################################
#sys.argv = ["SynData_PPMTF.py", "SmplA1"]
if len(sys.argv) < 2:
print("Usage:",sys.argv[0],"[ParamA]" )
sys.exit(0)
# City
City = "TK"
#City = "OS"
# Training user index file (input)
TUserIndexFile = "data/tuserindex_XX.csv"
# POI index file (input)
POIIndexFile = "data/POIindex_XX.csv"
# Training transition tensor file (input)
TrainTransTensorFile = "data/traintranstensor_XX.csv"
# Training visit tensor file (input)
TrainVisitTensorFile = "data/trainvisittensor_XX.csv"
# Prefix of the model parameter file (input)
ModelParameterFile = "data/models_syntraces_XX/modelparameter"
# Prefix of the synthesized trace file (output)
SynTraceFile = "data/models_syntraces_XX/syntraces"
# Name of the model parameter A
ParamA = sys.argv[1]
#ParamA = "SmplA"
# Number of time periods
T = 12
# Number of columns in model parameters (A, B, C)
K = 32
#K = 16
# Number of iterations in Gibbs sampling
ItrNum = 100
#ItrNum = 10
# Threshold for a visit count
#VisThr = 0.5
VisThr = 0
# Minimum value of a visit count
VisitDelta = 0.00000001
# Minimum value of a transition count
TransDelta = 0.00000001
# Read trans from TrainTransTensorFile (1:yes, 0:no)
ReadTrans = 1
#ReadTrans = 0
# Read visits from TrainVisitTensorFile (1:yes, 0:no)
ReadVisit = 1
#ReadVisit = 0
# Number of traces per user
#TraceNum = 1
TraceNum = 60
# Number of time instants per time period
#TimInsNum = 1
TimInsNum = 12
# Increase visit-counts (normalized to [0,1]) for a specific location (home) at 6-7h & 19-24h (time_poi_dist) by Gamma
Gamma = 20
# Number of synthesized users
SynN = 2000
########################### Read model parameters #############################
# [output1]: A (N x K matrix)
# [output2]: B (M x K matrix)
# [output3]: C (M x K matrix)
# [output4]: D (T x K matrix)
def ReadModelParameters():
# Read model parameter A
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_" + ParamA + ".csv"
f = open(infile, "r")
A = np.loadtxt(infile, delimiter=",")
f.close()
# Read model parameter B
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_B.csv"
f = open(infile, "r")
B = np.loadtxt(infile, delimiter=",")
f.close()
# Read model parameter C
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_C.csv"
f = open(infile, "r")
C = np.loadtxt(infile, delimiter=",")
f.close()
# Read model parameter D
infile = ModelParameterFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_D.csv"
f = open(infile, "r")
D = np.loadtxt(infile, delimiter=",")
f.close()
return A, B, C, D
############################## Synthesize traces ##############################
# [input1]: A (N x K matrix)
# [input2]: B (M x K matrix)
# [input3]: D (T x K matrix)
# [input4]: N -- Number of users
# [input5]: M -- Number of POIs
# [input6]: T -- Number of time periods
# [input7]: poi_dic ({poi_index: category})
def SynTraces(A, B, D, N, M, T, poi_dic):
# Initialization
ab = np.zeros(M)
ad = np.zeros(M)
# Output header information
outfile = SynTraceFile + "_K" + str(K) + "_Itr" + str(ItrNum) + "_" + ParamA + ".csv"
f = open(outfile, "w")
print("user,trace_no,time_period,time_instant,poi_index,category", file=f)
writer = csv.writer(f, lineterminator="\n")
# Read transitions from TrainTransTensorFile --> trans
if ReadTrans == 1:
trans = np.zeros((M, M))
g = open(TrainTransTensorFile, "r")
reader = csv.reader(g)
next(reader)
for lst in reader:
poi_index_from = int(lst[1])
poi_index_to = int(lst[2])
trans[poi_index_from,poi_index_to] = 1
g.close()
# Read visits from TrainVisitTensorFile --> visit
if ReadVisit == 1:
visit = np.zeros((T, M))
g = open(TrainVisitTensorFile, "r")
reader = csv.reader(g)
next(reader)
for lst in reader:
poi_index_from = int(lst[1])
time_id = int(lst[2])
visit[time_id,poi_index_from] = 1
g.close()
HomeFileName = "home_" + ParamA + ".csv"
g = open(HomeFileName, "w")
# For each user
for n in range(SynN):
# Initialization
time_poi_dist = np.zeros((T, M))
time_poi_dist_sum = np.zeros(T)
prop_mat = np.zeros((M, M))
trans_vec = np.zeros(M)
################### Calculate the POI distributions ###################
for t in range(T):
ad = A[n, :] * D[t, :]
for i in range(M):
# Elements in a sampled visit tensor --> time_poi_dist
time_poi_dist[t,i] = np.sum(ad * B[i, :])
# Assign VisitDelta for an element whose value is less than VisThr
if time_poi_dist[t,i] < VisThr:
time_poi_dist[t,i] = VisitDelta
# Assign VisitDelta if there is no visits for time t & user i
if ReadVisit == 1 and visit[t,i] == 0:
time_poi_dist[t,i] = VisitDelta
# Normalize time_poi_dist (this is necessary for randomly sampling home_loc)
for t in range(T):
time_poi_dist_sum[t] = np.sum(time_poi_dist[t])
if time_poi_dist_sum[t] > 0:
time_poi_dist[t] /= time_poi_dist_sum[t]
else:
print("Error: All probabilities are 0 for user", n, "and time", t)
sys.exit(-1)
# Randomly sample home from the POI distribution at 6h --> home_loc
rnd = np.random.rand()
prob_sum = 0
for i in range(M):
prob_sum += time_poi_dist[0,i]
if prob_sum >= rnd:
break
home_loc = i
# print(home_loc)
print(home_loc, file=g)
# Increase visit-counts for home_loc at 6-7h & 18-21h (time_poi_dist) by Gamma
for t in range(2):
time_poi_dist[t,home_loc] += Gamma
for t in range(T-2,T):
time_poi_dist[t,home_loc] += Gamma
# Normalize time_poi_dist at 6-7h & 18-21h (again)
for t in range(2):
time_poi_dist_sum[t] = np.sum(time_poi_dist[t])
if time_poi_dist_sum[t] > 0:
time_poi_dist[t] /= time_poi_dist_sum[t]
else:
print("Error: All probabilities are 0 for user", n, "and time", t)
sys.exit(-1)
for t in range(T-3,T):
time_poi_dist_sum[t] = np.sum(time_poi_dist[t])
if time_poi_dist_sum[t] > 0:
time_poi_dist[t] /= time_poi_dist_sum[t]
else:
print("Error: All probabilities are 0 for user", n, "and time", t)
sys.exit(-1)
#################### Calculate the proposal matrix ####################
for i in range(M):
ab = A[n, :] * B[i, :]
# Elements in a sampled transition tensor (assign TransDelta for a small transition count) --> prop_mat
for j in range(M):
prop_mat[i,j] = max(np.sum(ab * C[j, :]), TransDelta)
# Assign TransDelta if there is no transitions between i and j
if ReadTrans == 1 and trans[i,j] == 0:
prop_mat[i,j] = TransDelta
# Normalize prop_mat
row_sum = np.sum(prop_mat[i])
prop_mat[i] /= row_sum
########################## Synthesize traces ##########################
poi_index_pre = 0
# For each trace
for trace_no in range(TraceNum):
# For each time period
for t in range(T):
# For each time instant
for ins in range(TimInsNum):
# Initial time period and initial event
if t == 0 and ins == 0:
# Randomly sample POI from the POI distribution
rnd = np.random.rand()
prob_sum = 0
for i in range(M):
prob_sum += time_poi_dist[t,i]
if prob_sum >= rnd:
break
poi_index = i
else:
##### Transform poi_index_pre into poi_index via MH (Metropolis-Hastings) ######
# Calculate the transition vector --> trans_vec
trans_vec[poi_index_pre] = 0
for j in range(M):
if poi_index_pre != j:
alpha = (time_poi_dist[t][j] * prop_mat[j,poi_index_pre]) / (time_poi_dist[t][poi_index_pre] * prop_mat[poi_index_pre,j])
trans_vec[j] = prop_mat[poi_index_pre,j] * min(1, alpha)
row_sum = np.sum(trans_vec)
trans_vec[poi_index_pre] = 1 - row_sum
# Transform poi_index_pre into poi_index via trans_vec
rnd = np.random.rand()
prob_sum = 0
for j in range(M):
prob_sum += trans_vec[j]
if prob_sum >= rnd:
break
poi_index = j
# Output an initial location ([user, trace_no, time_period, time_instant, poi_index, category])
s = [n, trace_no, t, ins, poi_index, poi_dic[poi_index]]
writer.writerow(s)
# Save the previous poi_index
poi_index_pre = poi_index
f.close()
g.close()
#################################### Main #####################################
# Fix a seed
#np.random.seed(1)
# Fix a seed using a random number in [0,2^32-1]
#np.random.seed(819081307) # Preliminary
np.random.seed(538173108) # Final (TK)
# Replace XX with City
TUserIndexFile = TUserIndexFile.replace("XX", City)
POIIndexFile = POIIndexFile.replace("XX", City)
TrainTransTensorFile = TrainTransTensorFile.replace("XX", City)
TrainVisitTensorFile = TrainVisitTensorFile.replace("XX", City)
ModelParameterFile = ModelParameterFile.replace("XX", City)
SynTraceFile = SynTraceFile.replace("XX", City)
# Number of training users --> N
N = len(open(TUserIndexFile).readlines()) - 1
# Number of POIs --> M
M = len(open(POIIndexFile).readlines()) - 1
# Read the POI index file --> poi_dic ({poi_index: category})
poi_dic = {}
f = open(POIIndexFile, "r")
reader = csv.reader(f)
next(reader)
for lst in reader:
poi_dic[int(lst[1])] = lst[2]
# Read model parameters
A, B, C, D = ReadModelParameters()
# Synthesize traces
SynTraces(A, B, D, N, M, T, poi_dic)
|
import urllib.request
import gzip
import shutil
import tarfile
import os
working_directory ='pdf_download/'
pdf_directory = '../pdf/'
infile = '../data/pmcid_to_oa_url_for_new_papers.lst'
def download_pdf():
os.chdir(working_directory)
f = open(infile)
for line in f:
download_one_pdf(line)
def download_one_pdf(line):
## PMC5695854 29167799 2017 ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_package/00/33/PMC5695854.tar.gz
## PMC7332886 32670337 2020 ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_package/00/3c/PMC7332886.tar.gz
[pmc, pmid, year, url] = line.strip().split('\t')
[url_path, file_name] = url.split('/PMC')
url_path = url_path + '/'
file_name = 'PMC' + file_name
# url_path = 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_package/00/33/'
# file_name = 'PMC5695854.tar.gz'
file_dir = file_name.replace('.tar.gz', '')
pdf_file = pdf_directory + pmid + '.pdf'
if os.path.isfile(pdf_file):
print ("PDF is alresdy downloaded: ", line)
return
try:
urllib.request.urlretrieve(url_path + file_name, file_name)
urllib.request.urlcleanup()
except:
print ("ERROR downloading gzipped tar file: ", line)
return
try:
tf = tarfile.open(file_name)
tf.extractall()
except:
print ("ERROR found when untaring file: ", line)
return
pdf_files = []
to_exclude_list = ["supplement", "_supp", ".sapp.pdf"]
to_exclude_starter = ["supp_", "presentation", "image_", "table_", "datasheet", "data_sheet"]
for filename in os.listdir(file_dir):
if filename == 'main.pdf':
pdf_files = [filename]
break
if filename.endswith(".pdf"):
found = 0
for keyword in to_exclude_list:
if keyword in filename.lower():
found = 1
break
if found == 1:
continue
found = 0
for keyword in to_exclude_starter:
if filename.lower().startswith(keyword):
found = 1
break
if found == 1:
continue
pdf_files.append(filename)
if len(pdf_files) == 1:
filename = pdf_files[0]
shutil.copy(file_dir + '/' + filename, pdf_file)
print ("FOUND one PDF: ", line)
elif len(pdf_files) > 1:
right_pdf = ''
print (pdf_files)
for file in pdf_files:
if right_pdf == '':
right_pdf = file
elif len(file) < len(right_pdf):
right_pdf = file
if right_pdf != '':
print ("right_pdf="+right_pdf)
right_pdf_root = right_pdf.replace('.pdf', '').replace('_Article', '').lower()
bad = 0
for file in pdf_files:
if file == right_pdf:
continue
if not file.lower().startswith(right_pdf_root):
bad = 1
break
if bad == 0:
shutil.copy(file_dir + '/' + right_pdf, pdf_file)
print ("Picked one good PDF: ", line)
else:
print (pdf_files)
print ("Need manual check/pick PDF: ", line)
else:
print ("No good PDF found: ", line)
else:
print ("NO PDF FOUND: ", line)
if __name__ == "__main__":
download_pdf()
|
#
# Test if all procedures,s functionName parameter works correctly
# Francois Maillet, 22 sept 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
import random, datetime
from mldb import mldb, ResponseException
# Create toy dataset
dataset_config = {
'type' : 'sparse.mutable',
'id' : "toy"
}
dataset = mldb.create_dataset(dataset_config)
now = datetime.datetime.now()
for i in range(50):
label = random.random() < 0.2
dataset.record_row("u%d" % i, [["feat1", random.gauss(5 if label else 15, 3), now],
["feat2", random.gauss(-5 if label else 10, 10), now],
["feat3", random.gauss(52 if label else 30, 40), now],
["label", label, now]])
dataset.commit()
def do_checks(conf):
mldb.log(">> Checking " + conf["type"])
rez = mldb.put("/v1/procedures/" + conf["type"], conf)
mldb.log(rez)
rez = mldb.post("/v1/procedures/"+conf["type"] + "/runs")
mldb.log(rez)
rez = mldb.get("/v1/functions/" + conf["params"]["functionName"])
mldb.log(rez)
# classifier.train -> classifier
conf = {
"type": "classifier.train",
"params": {
"trainingData":
"select {* EXCLUDING(label)} as features, label from toy",
"modelFileUrl": "file://build/x86_64/tmp/bouya.cls",
"algorithm": "glz",
"mode": "boolean",
"configuration": {
"glz": {
"type": "glz",
"verbosity": 3,
"normalize": False,
"regularization": 'l2'
}
},
"functionName": "cls_func"
}
}
do_checks(conf)
# kmeans.train -> kmeans
conf = {
"type": "kmeans.train",
"params": {
"trainingData": "select * excluding(label) from toy",
"modelFileUrl": "file://tmp/MLDB-926.mks",
"centroidsDataset": {"id": "kmean_out", "type": "sparse.mutable" },
"functionName": "kmeans_func"
}
}
do_checks(conf)
# test also the error code returned
del conf['params']['modelFileUrl']
conf['params']['runOnCreation'] = True
try:
mldb.put("/v1/procedures/failing_kmeans", conf)
except ResponseException as exc:
rez = exc.response
else:
assert False, 'should not be here 1'
response = rez.json()
mldb.log(response)
assert rez.status_code == 400, 'expecting call to fail when no model file URL'
assert 'error' in response, 'expecting the error message to appear'
assert 'httpCode' in response, 'expecting an httpCode for the run error'
conf['params']['modelFileUrl'] = "not://a/valid/path"
conf['params']['runOnCreation'] = True
try:
mldb.put("/v1/procedures/failing_kmeans2", conf)
except ResponseException as exc:
rez = exc.response
else:
assert False, 'should not be here 2'
response = rez.json()
mldb.log(response)
assert rez.status_code == 400, 'expecting call to fail when no model file URL'
assert 'error' in response, 'expecting the error message to appear'
assert 'httpCode' in response, 'expecting an httpCode for the run error'
# probabilizer.train -> probabilizer
conf = {
"type": "probabilizer.train",
"params": {
"trainingData":
"select cls_func({{* EXCLUDING(label)} as features})[score] as score, label from toy",
"modelFileUrl": "file://build/x86_64/tmp/bouya-proba.json",
"functionName": "probabilizer_func"
}
}
do_checks(conf)
# svd.train -> svd.embedRow
conf = {
"type": "svd.train",
"params": {
"trainingData": "select * from toy",
"modelFileUrl": "file://build/x86_64/tmp/bouya-svd.model",
"functionName": "svd_func"
}
}
do_checks(conf)
# tsne.train -> tsne.embedRow
conf = {
"type": "tsne.train",
"params": {
"trainingData": "select * from toy",
"modelFileUrl": "file://build/x86_64/tmp/bouya-tsne.model",
"numOutputDimensions": 2,
"functionName": "tsne_func"
}
}
do_checks(conf)
request.set_return("success")
|
# math2d_polyline.py
import copy
import math
from math2d_vector import Vector
from math2d_triangle import Triangle
from math2d_line_segment import LineSegment
class Polyline(object):
def __init__(self):
self.vertex_list = []
def Copy(self):
return copy.deepcopy(self)
def Serialize(self):
json_data = {
'vertex_list': [vertex.Serialize() for vertex in self.vertex_list]
}
return json_data
def Deserialize(self, json_data):
self.vertex_list = [Vector().Deserialize(vertex) for vertex in json_data.get('vertex_list', [])]
return self
def Render(self):
from OpenGL.GL import glBegin, glEnd, glVertex2f, GL_LINE_STRIP
glBegin(GL_LINE_STRIP)
try:
for i in range(len(self.vertex_list)):
point = self.vertex_list[i]
glVertex2f(point.x, point.y)
finally:
glEnd()
def GenerateLineSegments(self):
for i in range(len(self.vertex_list) - 1):
yield LineSegment(point_a=self.vertex_list[i], point_b=self.vertex_list[i + 1])
def Length(self):
total_length = 0.0
for i in range(len(self.vertex_lsit) - 1):
total_length += (self.vertex_list[i + 1] - self.vertex_list[i]).Length()
return total_length |
from djaveAPI.find_models import publishable_model_from_name
from djaveAPI.problem import Problem
def get_publishable_model(model_name):
model = publishable_model_from_name(model_name)
if not model:
raise Problem(
'There is no {} API'.format(model_name), status_code=404)
return model
|
# Alex Ciaramella and Greg Suner
# Abstract Tournament Class
# Tournament is observable while players are observers
import Message
import Observable
import ScoreKeeper
class Tournament(Observable.Observable):
# set up a list of players when tournament is initialized
def __init__(self):
Observable.Observable.__init__(self)
self.playerList = []
self.game = None
self.display = None
self.scorekeeper = ScoreKeeper.ScoreKeeper()
def attach_display(self, display):
self.display = display
self.add_observer(self.display)
# Returns the players in the tournament
def get_players(self):
return self.playerList
# run the tournament
def run(self):
self.begin_tournament()
while True:
match = self.create_next_match()
if match is None:
break
self.play_match(match)
self.end_tournament()
self.scorekeeper.print_final_stats()
# get a reference to the next game to be played
def create_next_match(self):
pass
# register a player for the tournament by adding them to
# the list of current players
def register_player(self, player):
self.playerList.append(player)
self.add_observer(player)
# stores a reference to the type of game we will be playing
def set_game(self, game):
self.game = game
# Computes the result of a round based on the moves made by the players
def get_result(self, moves):
return self.game.get_result(moves)
# play the next match and return the results
def play_match(self, match):
players = match[0]
self.start_match(players)
result = self.play_rounds(match) # play_rounds should return a value, but doesn't... TODO??
self.end_match(players, result)
# plays each individual game in the match
"""
This function should return a result, but when it does return result,
it stops the match in the preceding play_match function.
This is likely a bug, but I haven't figured out a solution to this.
"""
def play_rounds(self, match):
players = match[0]
rounds = match[1]
for i in range(rounds):
self.start_round(players)
moves = []
for p in players:
moves.append(p.play())
result = self.get_result(moves)
self.end_round(players, moves, result)
# notifies players tournament has begun
def begin_tournament(self):
pass
# Announces results of tournament to all players
def end_tournament(self):
message = Message.Message.get_tournament_end_message(players)
self.notify_all(message)
# send a message containing a list of all the players in the current match
def start_match(self, players):
message = Message.Message.get_match_start_message(players)
self.notify_all(message)
# send a message containing the result of the match
def end_match(self, players, result):
message = Message.Message.get_match_end_message(players, result)
self.notify_all(message)
# send a message containing the players in the next game
def start_round(self, players):
message = Message.Message.get_round_start_message(players)
self.notify_all(message)
# send a message containing the players, moves, and result of the last game
def end_round(self, players, moves, result):
#find winner based on the largest score
if(result[0] == result[1]): #if tie, no winner awarded
winner = None
else:
winner = players[result.index(max(result))]
self.scorekeeper.update_tournament(players, winner, result)
message = Message.Message.get_round_end_message(players, moves, result)
self.notify_all(message)
|
from zaggregator.procbundle import *
from zaggregator.utils import *
name = "zaggregator"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# python client for ASL
# asl generator and pipeline
# prototype and demo
import json
import sys
import os
import xml.dom.minidom
import StringIO
import re
import logging
import uuid
# locate analyic_server from the build/checker library
# sys.path.append(sys.path.append(os.path.join(os.path.dirname(__file__),"../../../../build/checker/lib")))
from analytic_server import analyticServer
from aspy.analytic_server import analyticServer
logger = logging.getLogger('aspy')
class as_datamodel(object):
def __init__(self,dmxml):
self.dmxml = dmxml
self.fields = []
doc = xml.dom.minidom.parse(StringIO.StringIO(self.dmxml))
fields = doc.getElementsByTagName("Field")
for field in fields:
name = field.getAttribute("name")
storage = field.getAttribute("storage")
self.fields.append((name,storage))
def __repr__(self):
return str(self.fields)
def getFieldNames(self):
return [name for (name,_) in self.fields]
class as_resultset(object):
def __init__(self,server,dsname,removeOnClose=False):
self.server = server
self.dsname = dsname
self.removeOnClose = removeOnClose
dmxml = self.server.get_datasource_datamodel(dsname)
self.dm = as_datamodel(dmxml)
self.row = -1
self.batchsize = 1000
self.batchdata = []
def reset(self):
self.row = -1
self.batchsize = 1000
self.batchdata = []
def __iter__(self):
return self
def next(self):
if self.row == -1:
self.row += 1
return self.dm.getFieldNames()
else:
if len(self.batchdata) == 0:
self.getBatch()
if len(self.batchdata) == 0:
raise StopIteration
result = self.batchdata[0]
self.batchdata = self.batchdata[1:]
self.row += 1
return result
def getBatch(self):
self.batchdata = json.loads(self.server.get_datasource_records(self.dsname,self.row,self.batchsize))
def close(self):
if self.removeOnClose:
self.server.remove_datasource(self.dsname)
def toPandas(self):
import pandas as pd
self.reset()
headers = {}
data = {}
rows = [row for row in self]
headers = rows[0]
data = rows[1:]
dd = {}
for i in range(0,len(headers)):
header = headers[i]
column = [row[i] for row in data]
dd[header] = column
return pd.DataFrame(dd)
class model_node(object):
def __init__(self):
pass
class as_pipeline(object):
def __init__(self,server):
self.server = server
self.ops = []
class as_datasource_read(object):
def __init__(self,datasource):
self.ds = datasource
def to_asl(self):
return "read(%s)"%(json.dumps({"dataSource":self.ds}))
class as_aggregate(object):
def __init__(self,keys,functions):
self.keys = keys
self.functions = functions
def to_asl(self):
conf = {}
output = []
for key in self.keys:
output.append({ "input":key, "function":"key"})
for function in self.functions:
if function == "count":
output.append({ "function":"count" })
else:
aggfn = function[:function.index("_")]
inp = function[function.index("_")+1:]
output.append({ "outName":function, "input":inp, "function":aggfn })
conf["output"] = output
return "aggregate(%s)"%(json.dumps(conf))
class as_select(object):
def __init__(self,expr):
m = re.search('lambda ([^:]+):(.*)',expr)
self.varname = m.group(1)
self.body = m.group(2)
def to_asl(self):
return "filterRecords(_,ctx => %s => %s)"%(self.varname,self.body)
class as_derive(object):
def __init__(self,name,expr):
m = re.search('lambda ([^:]+):(.*)',expr)
self.name = name
self.varname = m.group(1)
self.body = m.group(2)
def to_asl(self):
return "deriveField(_,ctx => %s => (\"%s\",%s))"%(self.varname,self.name,self.body)
class as_tree(model_node):
def __init__(self,teConf,outCon):
self.teConf = teConf
self.outCon = outCon
def to_asl(self):
#asl = "cf_tree:tree(%s,_,%s))"%(self.teConf,self.outCon)
asl = "cf_tree:tree(%s,_,%s)"%(json.dumps(self.teConf),json.dumps(self.outCon))
return asl
class as_linear(model_node):
def __init__(self,teConf,outCon):
self.teConf = teConf
self.outCon = outCon
def to_asl(self):
asl = "cf_alm:linreg(%s,_,%s)"%(json.dumps(self.teConf),json.dumps(self.outCon))
return asl
class as_scoring(object):
def __init__(self,scoreConf,inputConf):
self.scoreConf = scoreConf
self.inputConf = inputConf
def to_asl(self):
asl = "cf_scoring:scoring(%s,%s,_)"%(json.dumps(self.scoreConf),json.dumps(self.inputConf))
return asl
class as_datasource_write(object):
def __init__(self,datasource,mode):
self.ds = datasource
self.mode = mode
def to_asl(self):
return "write(%s)"%(json.dumps({"dataSource":self.ds,"mode":self.mode}))
def read_datasource(self,datasource):
self.ops = [as_pipeline.as_datasource_read(datasource)]
return self
def aggregate(self,keys,functions):
self.ops.append(as_pipeline.as_aggregate(keys,functions))
return self
def select(self,lf):
self.ops.append(as_pipeline.as_select(lf))
return self
def derive(self,name,expr):
self.ops.append(as_pipeline.as_derive(name,expr))
return self
def tree(self,scoreConf,inputConf):
self.ops.append(as_pipeline.as_tree(scoreConf,inputConf))
return self
def linear(self,scoreConf,inputConf):
self.ops.append(as_pipeline.as_linear(scoreConf,inputConf))
return self
def scoring(self,teConf,outCon):
self.ops.append(as_pipeline.as_scoring(teConf,outCon))
return self
def write_datasource(self,datasource,mode):
self.ops.append(as_pipeline.as_datasource_write(datasource,mode))
return self
def __repr__(self):
s = ""
for i in range(0,len(self.ops)):
if i:
s += " -> "
s += self.ops[i].to_asl()
return s
def run(self):
if len(self.ops) > 0 and isinstance(self.ops[0],as_pipeline.as_datasource_read):
if isinstance(self.ops[-1],as_pipeline.as_datasource_write) or isinstance(self.ops[len(self.ops) - 1],model_node):
self.server.run(self)
return None
else:
dsName = "ds-" + str(uuid.uuid4()) # FIXME should use a UUID
self.server.create_writable_datasource(dsName)
self.write_datasource(dsName,"overwrite")
self.server.run(self)
self.ops = self.ops[:-1]
return as_resultset(self.server,dsName,True)
else:
raise Exception("cannot run malformed pipeline")
class analytic_server(object):
def __init__(self, host, port, useSSL, user, password):
self.server = analyticServer(host,port, None, False)
self.server.login(user,password)
def run(self,pipeline,project="public"):
self.server.lock_project("public")
try:
self.server.run_asl("public",str(pipeline))
finally:
self.server.commit_project("public")
def remove_datasource(self,name):
self.server.remove_datasource(name)
def create_writable_datasource(self,name,project="public"):
self.server.create_writable_datasource(name,project)
def get_datasource_records(self,name,start,count):
return self.server.get_datasource_records(name,start,count)
def get_datasource_datamodel(self,name):
return self.server.get_datasource_datamodel(name)
def create_pipeline(self):
return as_pipeline(self)
|
from dnnv.properties import expressions
from dnnv.properties.expressions.terms.constant import Constant
from dnnv.properties.visitors import ExpressionVisitor
def test_ExpressionVisitor():
visitor = ExpressionVisitor()
visitor.visit(expressions.Symbol("A"))
visitor.visit(
expressions.And(
Constant(False),
expressions.Or(
expressions.Symbol("B"),
expressions.Implies(expressions.Symbol("C"), expressions.Symbol("D")),
),
)
)
|
from tkinter import *
from tkinter import messagebox
from modules.Sounds import click
from modules.Credentials import db_connection, bookTable, bid_check
def add_check():
with open(r"counters\addbook.txt", "r") as file:
a = file.read()
return True if a == "1" else False
def add_open():
with open(r"counters\addbook.txt", "a") as file:
file.write("1")
def add_close():
with open(r"counters\addbook.txt", "w") as file:
file.write("")
def bookRegister():
bid = bidEntry.get()
title = titleEntry.get()
author = authEntry.get()
status = statusVar.get()
issued = 1 if status else 0
issuedTo = issuedToEntry.get()
if bid == "" or title == "" or author == "" or (status and not issuedTo):
messagebox.showwarning("Caution", "All fields must be filled")
AddBookWindow.lift()
return
else:
try:
con = db_connection()
cur = con.cursor()
if bid_check(bid, con, cur):
messagebox.showwarning("Caution", "Book ID already exist.")
AddBookWindow.lift()
return
if status:
cur.execute(
f"insert into {bookTable} values('{bid}','{title}','{author}',{issued},'{issuedTo}')"
)
else:
cur.execute(
f"insert into {bookTable}(bid, title, author, issued) values('{bid}','{title}','{author}',{status})"
)
con.commit()
con.close()
messagebox.showinfo("Success", f"Book named {title} added successfully.")
AddBookWindow.destroy()
add_close()
except:
messagebox.showerror("Error", "Can't add data into Database")
AddBookWindow.destroy()
addBookWin()
def addBookWin():
if add_check():
global bidEntry, titleEntry, authEntry, statusVar, issuedToEntry, AddBookWindow
AddBookWindow = Toplevel()
AddBookWindow.title("Add Book AVBIL LM")
AddBookWindow.iconbitmap("media\logo.ico")
AddBookWindow.minsize(width=550, height=450)
AddBookWindow.geometry("600x500")
AddBookWindow.lift()
# Canvas
Canvas1 = Canvas(AddBookWindow)
Canvas1.config(bg="#161616")
Canvas1.pack(expand=True, fill=BOTH)
# Heading
headingFrame1 = Frame(AddBookWindow, bg="#ff6e40", bd=5)
headingFrame1.place(relx=0.25, rely=0.1, relwidth=0.5, relheight=0.13)
headingLabel = Label(
headingFrame1,
text="Add Book",
bg="#121212",
fg="white",
font=("Segoe UI", 20),
)
headingLabel.place(relx=0, rely=0, relwidth=1, relheight=1)
# Label Frame
labelFrame = Frame(AddBookWindow, bg="#121212")
labelFrame.place(relx=0.1, rely=0.4, relwidth=0.8, relheight=0.4)
# Book ID
bidLbl = Label(
labelFrame, text="Book ID : ", bg="#121212", fg="white", font=("Segoe UI",)
)
bidLbl.place(relx=0.05, rely=0.2, relheight=0.08)
bidEntry = Entry(labelFrame)
bidEntry.place(relx=0.4, rely=0.2, relwidth=0.52, relheight=0.08)
# Title
titleLbl = Label(
labelFrame, text="Title : ", bg="#121212", fg="white", font=("Segoe UI",)
)
titleLbl.place(relx=0.05, rely=0.35, relheight=0.08)
titleEntry = Entry(labelFrame)
titleEntry.place(relx=0.4, rely=0.35, relwidth=0.52, relheight=0.08)
# Book Author
authLbl = Label(
labelFrame, text="Author : ", bg="#121212", fg="white", font=("Segoe UI",)
)
authLbl.place(relx=0.05, rely=0.50, relheight=0.08)
authEntry = Entry(labelFrame)
authEntry.place(relx=0.4, rely=0.50, relwidth=0.52, relheight=0.08)
# Book Status
statusLbl = Label(
labelFrame,
text="Issued : ",
bg="#121212",
fg="white",
font=("Segoe UI",),
)
statusLbl.place(relx=0.05, rely=0.65, relheight=0.08)
statusVar = BooleanVar()
def issuedTo():
if statusVar.get():
issuedToLbl["fg"] = "white"
issuedToEntry["state"] = "normal"
else:
issuedToLbl["fg"] = "grey"
issuedToEntry["state"] = "disabled"
statusEntry = Checkbutton(
labelFrame,
bg="#121212",
activebackground="#161616",
onvalue=True,
offvalue=False,
variable=statusVar,
command=issuedTo,
)
# statusEntry.deselect()
statusEntry.place(relx=0.4, rely=0.65)
# Issued To
issuedToLbl = Label(
labelFrame,
text="Issued to : ",
bg="#121212",
fg="grey",
font=("Segoe UI",),
)
issuedToLbl.place(relx=0.05, rely=0.8, relheight=0.08)
issuedToEntry = Entry(labelFrame, state="disabled")
issuedToEntry.place(relx=0.4, rely=0.8, relwidth=0.52, relheight=0.08)
# Add Button
SubmitBtn = Button(
AddBookWindow,
text="ADD",
bg="#121212",
fg="white",
font=("Segoe UI", 12),
command=lambda: [click(), bookRegister()],
)
SubmitBtn.place(relx=0.28, rely=0.9, relwidth=0.18, relheight=0.08)
def SubmitBtn_hoverin(event):
SubmitBtn["bg"] = "#222222"
SubmitBtn["font"] = "Segoe UI", 15
def SubmitBtn_hoverout(event):
SubmitBtn["bg"] = "#121212"
SubmitBtn["font"] = "Segoe UI", 12
SubmitBtn.bind("<Enter>", SubmitBtn_hoverin)
SubmitBtn.bind("<Leave>", SubmitBtn_hoverout)
# Back Button
backBtn = Button(
AddBookWindow,
text="CANCEL",
bg="#121212",
fg="white",
font=("Segoe UI", 12),
command=lambda: [click(), add_close(), AddBookWindow.destroy()],
)
backBtn.place(relx=0.53, rely=0.9, relwidth=0.18, relheight=0.08)
def backBtn_hoverin(event):
backBtn["bg"] = "#222222"
backBtn["font"] = "Segoe UI", 15
def backBtn_hoverout(event):
backBtn["bg"] = "#121212"
backBtn["font"] = "Segoe UI", 12
backBtn.bind("<Enter>", backBtn_hoverin)
backBtn.bind("<Leave>", backBtn_hoverout)
AddBookWindow.protocol(
"WM_DELETE_WINDOW", lambda: [click(), add_close(), AddBookWindow.destroy()]
)
AddBookWindow.mainloop()
else:
messagebox.showinfo("Caution", "Add Book AVBIL LM is already open.")
|
# Copyright 2021 qclib project.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Parametric probabilistic quantum memory"""
import numpy as np
def initialize(circuit, pattern, q_memory, q_auxiliary, is_classical_pattern=False):
r"""
Prepares a circuit which the output is determined by a probability distribution on the memory
which is peaked around the stored patterns closest in Hamming distance to the input.
`<https://arxiv.org/pdf/quant-ph/0012100v2.pdf>`_.
The retrieval algorithm requires three registers. The first, of ``n`` qubits, contains the input
pattern; the second, also of ``n`` qbits, contains the memory;
and finally there is a single qubit auxiliary register.
.. note::
The operator ``U``, used in Trugenberger's article, has been replaced by
``XPX`` (where ``P`` is the phase gate). Therefore, we removed the
``X`` (``NOT``) operators from equations (12) and (16) of the
paper.
.. note::
If the pattern is classical (``is_classical_pattern = True``),
the ``CNOT`` operators of equations (12) and (16) are applied directly
to the memory qubits using ``NOT`` gates (the article names the ``CNOT`` as ``XOR``).
Otherwise, ``CNOT's`` controlled by the pattern qubits are used and
applied to the memory qubits. In the latter case, ``n`` additional
qubits are needed, compared to the classical pattern case.
Args:
circuit: a qiskit quantum circuit.
pattern: a list of bits (0 and 1 ints) or a basis encoded quantum register .
q_memory: an amplitude encoded quantum register with memory data (superposition
of ``p`` patterns on ``n`` entangled qbits).
q_auxiliary: an uninitialized one-qubit quantum register.
is_classical_pattern: indicates if ``pattern`` is a classical data
(list of bits) or a quantum register.
"""
size = len(q_memory)
circuit.h(q_auxiliary)
if is_classical_pattern:
for k, q_m in enumerate(q_memory): # classical pattern register
if pattern[k]==1:
circuit.x(q_m)
else:
for k, q_m in enumerate(q_memory): # quantum pattern register
circuit.cx(pattern[k], q_m)
for k, q_m in enumerate(q_memory):
circuit.p(-np.pi / (2 * size), q_m)
for k, q_m in enumerate(q_memory):
circuit.cp( np.pi / size, q_auxiliary, q_m)
if is_classical_pattern:
for k, q_m in list(enumerate(q_memory))[::-1]: # classical pattern register
if pattern[k]==1:
circuit.x(q_m)
else:
for k, q_m in list(enumerate(q_memory))[::-1]: # quantum pattern register
circuit.cx(pattern[k], q_m)
circuit.h(q_auxiliary)
|
# encoding: utf8
# miner.py
import logging
from itertools import zip_longest, tee
from functools import partial, reduce
from operator import itemgetter
from pprint import pprint
from requests import Session, codes
from time import time, sleep
from tqdm import tqdm
from miptclass import models
from miptclass.api import Groups, Users, Friends
from miptclass.models import User, Group, UserFriends
def filter_fields(row, column_names):
return dict((field, value) for field, value in row.items() if field in column_names)
def save(db, rows):
for row in rows:
db.merge(row)
db.commit()
def mine_reference_groups(db):
session = Session()
logging.info('start mining reference group brief info')
reference = ['miptru']
mine_groups(reference, db, session, save)
logging.info('build list of unique user identifiers')
cursor = db.execute('SELECT id FROM users WHERE deactivated is NULL;')
uids = tuple(map(itemgetter(0), cursor.fetchall()))
logging.info('start mining info about group members')
mine_users(uids, db, session, save)
logging.info('start mining friend lists of group members')
mine_friends(uids, db, session, save)
def mine_groups(gids, db, session, save):
groups = Groups(session=session)
response = groups.getById(gids)
column_names = frozenset(dir(models.User))
rows = (filter_fields(item, column_names) for item in response)
rows = ((models.Group(**row), row['id']) for row in rows)
rows, ids = zip(*rows)
save(db, rows)
logging.info('start mining group members')
for group_id in ids:
logging.info('process group #', group_id)
mine_group(group_id, db, session, save)
def mine_group(gid, db, session, save):
groups = Groups(session=session)
response = groups.getAllMembers(gid)
column_names = frozenset(dir(models.User))
rows = [models.User(**filter_fields(item, column_names))
for item in response['items']]
save(db, rows)
def mine_users(uids, db, session, save):
user_columns = frozenset(dir(models.User))
university_columns = frozenset(dir(models.University))
user_university_columns = frozenset(dir(models.UserUniversities))
logging.info('insert user profiles into database')
users = Users(session=session)
response = users.getAllUsers(uids)
save(db, (models.User(**filter_fields(item, user_columns))
for item in response))
logging.info('insert universities into datatabase')
universities = filter(lambda x: 'universities' in x, response)
universities = map(itemgetter('id', 'universities'), universities)
universities, user_university = tee(universities)
universities = reduce(lambda x, y: x + y[1], universities, [])
universities = (models.University(**filter_fields(item,
university_columns))
for item in universities)
save(db, universities)
logging.info('insert user\'s universities into database')
user_university = map(lambda x:
zip_longest((x[0],),
map(itemgetter('id'), x[1]),
fillvalue=x[0]),
user_university)
user_university = reduce(lambda x, y: x + list(y), user_university, [])
user_university = map(lambda x: dict(id=x[0], university_id=x[1]),
user_university)
user_university = (models.UserUniversities(**filter_fields(item,
user_university_columns))
for item in user_university)
user_university = list(user_university)
save(db, user_university)
def mine_friends(uids, db, session, save):
friends = Friends(session=session)
friend_columns = frozenset(dir(models.UserFriends))
for i, uid in enumerate(tqdm(uids, unit='uid')):
response = friends.get(uid)
if all((response.get('error_code') == 15,
response.get('error_msg') == 'Access denied: user deactivated')):
db.execute("""
UPDATE users
SET deactivated = 'deactivated';
""")
db.commit()
elif 'error_code' in response:
logging.error('error was revieved for uid %d: %s(%d)',
uid, response['error_code'], response['error_msg'])
else:
user_friends = (filter_fields(row, friend_columns)
for row in response['items'])
user_friends = (models.UserFriends(id=uid, friend_id=row['id'])
for row in user_friends)
db.add_all(user_friends)
db.commit()
|
from setuptools import setup
# Read a __version__
exec(open('alfpy/version.py').read())
# Long description
fh = open('README.rst')
long_description = fh.read()
fh.close()
setup(
name='alfpy',
version=__version__,
description="Alignment-free package to compare DNA/RNA/protein sequences (bioinformatics).",
long_description=long_description,
author='Andrzej Zielezinski',
keywords='alignment-free bioinformatics sequence DNA protein homology phylogeny',
license="MIT",
author_email='andrzejz@amu.edu.pl',
url="http://www.combio.pl/alfree",
packages=['alfpy', 'alfpy.utils', 'alfpy.utils.data'],
#setup_requires=["numpy"],
install_requires=["numpy"],
scripts=[
'bin/calc_bbc.py',
'bin/calc_graphdna.py',
'bin/calc_fcgr.py',
'bin/calc_lempelziv.py',
'bin/calc_ncd.py',
'bin/calc_wmetric.py',
'bin/calc_word.py',
'bin/calc_word_bool.py',
'bin/calc_word_sets.py',
'bin/calc_word_cv.py',
'bin/calc_word_d2.py',
'bin/calc_word_ffp.py',
'bin/calc_word_rtd.py',
'bin/create_wordpattern.py'
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
) |
from Libraries.EmailService.LibraryGeneral.EmailService import ServicioEmail
email = ServicioEmail(emailOrigenBase64 = "d2lzcm92aS5yb2RyaWd1ZXpAZ21haWwuY29t",
passwordEmailBase64 = "RkM1SkI2RU0=",
nombreRemitenteBase64 = 'V0lTUk9WSQ==')
tituloMensaje = 'Final Trabajo'
destinatario = "wisrovi.rodriguez@gmail.com"
asunto = 'Final trabajo WISROVI'
body = 'son las 10:30pm'
rta = email.sendEmail(tituloMensaje, destinatario, asunto, body )
print(rta)
|
from typing import List
class CompleteBinaryWriteImpulse:
uri: str
uploadId: str
chunks: List[str]
def __init__(self, uri: str, uploadId: str, chunks: List[str]):
self.uri = uri
self.uploadId = uploadId
self.chunks = chunks
def __eq__(self, other):
return self.uri == other.uri and self.uploadId == other.uploadId and self.chunks == other.chunks
def __repr__(self):
return {'uri': self.uri, 'uploadId': self.uploadId, 'chunks': self.chunks}
|
# Importing the Kratos Library
from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable
if not CheckIfApplicationsAvailable("CompressiblePotentialFlowApplication"):
raise ImportError("The CompressiblePotentialFlowApplication is not available!")
# Importing the base class
from KratosMultiphysics.CoSimulationApplication.solver_wrappers.kratos import kratos_base_wrapper
# Other imports
from KratosMultiphysics.CompressiblePotentialFlowApplication.potential_flow_analysis import PotentialFlowAnalysis
from KratosMultiphysics.CompressiblePotentialFlowApplication.compute_forces_on_nodes_process import ComputeForcesOnNodesProcess
from KratosMultiphysics.CompressiblePotentialFlowApplication.define_wake_process_2d import DefineWakeProcess2D
from KratosMultiphysics.CompressiblePotentialFlowApplication.compute_lift_process import ComputeLiftProcess
def Create(settings, model, solver_name):
return PotentialFlowWrapper(settings, model, solver_name)
class PotentialFlowWrapper(kratos_base_wrapper.KratosBaseWrapper):
def _CreateAnalysisStage(self):
return PotentialFlowAnalysis(self.model, self.project_parameters)
def Predict(self):
pass
def Initialize(self):
super().Initialize()
sub_project_parameters = self.project_parameters["processes"]["boundary_conditions_process_list"]
for i in range(sub_project_parameters.size()):
if sub_project_parameters[i]["python_module"].GetString() == "define_wake_process_2d":
self.wake_process = DefineWakeProcess2D(self.model, sub_project_parameters[i]["Parameters"])
if not hasattr(self, "wake_process"):
raise Exception("potential flow requires specification of a process for the wake (currently specifically using 'define_wake_process_2d')")
if sub_project_parameters[i]["python_module"].GetString() == "compute_forces_on_nodes_process":
self.conversion_process = ComputeForcesOnNodesProcess(self.model, sub_project_parameters[i]["Parameters"])
if sub_project_parameters[i]["python_module"].GetString() == "compute_lift_process":
self.lift_process = ComputeLiftProcess(self.model, sub_project_parameters[i]["Parameters"])
def SolveSolutionStep(self):
self.wake_process.ExecuteInitialize()
## the next two lines are needed in order to add Wake DoFs to the new Wake Elements Nodes
## and delete the ones that are no longer in the Wake Region.
self._analysis_stage._GetSolver().Clear()
self._analysis_stage._GetSolver().InitializeSolutionStep()
super().SolveSolutionStep()
self.lift_process.ExecuteFinalizeSolutionStep()
self.conversion_process.ExecuteFinalizeSolutionStep()
|
from ..core import StateStatus, State
class PrintState(State):
_text: str
def __init__(self, name: str, text: str):
"""Constructor for PrintState
Parameters
----------
name : str
Name of the State, useful in Debugging.
text : str
Text to print on Screen
"""
super().__init__(name)
self._text = text
def execute(self, board):
print(self._text)
return StateStatus.SUCCESS
|
# --------------------------------------------------------------
# bm_ShuffleShortcuts.py
# Version: 2.0.0
# Author: Ben McEwan
#
# Last Modified by: Ben McEwan
# Last Updated: November 8th, 2021
# --------------------------------------------------------------
# --------------------------------------------------------------
# USAGE:
#
# Creates a shuffle node that shuffle RGBA channels into the Green channel.
# Updated to use Shuffle2...
# --------------------------------------------------------------
import nuke
# Define the function
def createShuffleShortcut(in_red, out_red, in_green, out_green, in_blue, out_blue, in_alpha, out_alpha, rColor, gColor, bColor, label):
myShuffle = nuke.createNode("Shuffle2")
# Set 'in' channel to RGBA out.
myShuffle.knob('mappings').setValue([(in_red, out_red), (in_green, out_green), (in_blue, out_blue), (in_alpha, out_alpha)])
# Change the node colour to green (we have to convert Nuke's weird hex colour values to RGB to be a bit more human-readable)
myShuffle['tile_color'].setValue(int('%02x%02x%02x%02x' % (rColor*255,gColor*255,bColor*255,1),16))
# Add a node label
myShuffle['label'].setValue(label)
# Define the function
def shuffleRGBchannels():
# Create a variable for the selected node, before creating any shuffle nodes.
selectedNode = nuke.selectedNode()
# Get the X-Position and y-Position of the selected node.
selectedNode_xPos = selectedNode['xpos'].value()
selectedNode_yPos = selectedNode['ypos'].value()
# Create our Red, Green & Blue Shuffle nodes, and assign them to a variable after creation.
createShuffleShortcut('rgba.red', 'rgba.red', 'rgba.red', 'rgba.green', 'rgba.red', 'rgba.blue', 'rgba.red', 'rgba.alpha', 1, 0, 0, 'Red to All')
redShuffle = nuke.selectedNode()
createShuffleShortcut('rgba.green', 'rgba.red', 'rgba.green', 'rgba.green', 'rgba.green', 'rgba.blue', 'rgba.green', 'rgba.alpha', 0, 1, 0, 'Green to All')
greenShuffle = nuke.selectedNode()
createShuffleShortcut('rgba.blue', 'rgba.red', 'rgba.blue', 'rgba.green', 'rgba.blue', 'rgba.blue', 'rgba.blue', 'rgba.alpha', 0, 0, 1, 'Blue to All')
blueShuffle = nuke.selectedNode()
# Set the input of the Red Shuffle node to the selected node, and Transform the Red Shuffle node down and to the left.
redShuffle.setInput(0, selectedNode)
redShuffle['xpos'].setValue(selectedNode_xPos-150)
redShuffle['ypos'].setValue(selectedNode_yPos+150)
# Set the input of the Green Shuffle node to the selected node, and Transform the Green Shuffle node down.
greenShuffle.setInput(0, selectedNode)
greenShuffle['xpos'].setValue(selectedNode_xPos)
greenShuffle['ypos'].setValue(selectedNode_yPos+150)
# Set the input of the Blue Shuffle node to the selected node, and Transform the Blue Shuffle node down and to the right.
blueShuffle.setInput(0, selectedNode)
blueShuffle['xpos'].setValue(selectedNode_xPos+150)
blueShuffle['ypos'].setValue(selectedNode_yPos+150)
# Create merge node and set the operation to max, connect the inputs to our 3 shuffle nodes, then Transform the Merge node into place.
mergeNode = nuke.createNode("Merge2")
mergeNode['operation'].setValue("plus")
mergeNode.setInput(0, redShuffle)
mergeNode.setInput(1, greenShuffle)
mergeNode.setInput(3, blueShuffle)
mergeNode['xpos'].setValue(selectedNode_xPos)
mergeNode['ypos'].setValue(selectedNode_yPos+300)
# Add menu items to the Channel nodes menu
nuke.menu('Nodes').addCommand("Channel/Shuffle (Red to All)", "bm_ShuffleShortcuts.createShuffleShortcut('rgba.red', 'rgba.red', 'rgba.red', 'rgba.green', 'rgba.red', 'rgba.blue', 'rgba.red', 'rgba.alpha', 1, 0, 0, 'Red to All')", "meta+r", icon="redShuffle.png", shortcutContext=2)
nuke.menu('Nodes').addCommand("Channel/Shuffle (Green to All)", "bm_ShuffleShortcuts.createShuffleShortcut('rgba.green', 'rgba.red', 'rgba.green', 'rgba.green', 'rgba.green', 'rgba.blue', 'rgba.green', 'rgba.alpha', 0, 1, 0, 'Green to All')", "meta+g", icon="greenShuffle.png", shortcutContext=2)
nuke.menu('Nodes').addCommand("Channel/Shuffle (Blue to All)", "bm_ShuffleShortcuts.createShuffleShortcut('rgba.blue', 'rgba.red', 'rgba.blue', 'rgba.green', 'rgba.blue', 'rgba.blue', 'rgba.blue', 'rgba.alpha', 0, 0, 1, 'Blue to All')", "meta+b", icon="blueShuffle.png", shortcutContext=2)
nuke.menu('Nodes').addCommand("Channel/Shuffle (Alpha to All)", "bm_ShuffleShortcuts.createShuffleShortcut('rgba.alpha', 'rgba.red', 'rgba.alpha', 'rgba.green', 'rgba.alpha', 'rgba.blue', 'rgba.alpha', 'rgba.alpha', 1, 1, 1, 'Alpha to All')", "meta+a", icon="alphaToAll.png", shortcutContext=2)
nuke.menu('Nodes').addCommand("Channel/Shuffle (White Alpha)", "bm_ShuffleShortcuts.createShuffleShortcut('rgba.red', 'rgba.red', 'rgba.green', 'rgba.green', 'rgba.blue', 'rgba.blue', 'white', 'rgba.alpha', 1, 1, 1, 'White Alpha')", "meta+1", icon="alpha1Shuffle.png", shortcutContext=2)
nuke.menu('Nodes').addCommand("Channel/Shuffle (Black Alpha)", "bm_ShuffleShortcuts.createShuffleShortcut('rgba.red', 'rgba.red', 'rgba.green', 'rgba.green', 'rgba.blue', 'rgba.blue', 'black', 'rgba.alpha', 0, 0, 0, 'Black Alpha')", "meta+`", icon="alpha0Shuffle.png", shortcutContext=2)
nuke.menu('Nodes').addCommand("Channel/Shuffle (Split RGB channels)", "bm_ShuffleShortcuts.shuffleRGBchannels()", "meta+s", icon="ShuffleSplitRGB.png", shortcutContext=2)
|
import torch
import torchvision
import torchvision.transforms as T
import model
import config
import dataset
import numpy as np
import time
from tqdm import tqdm
__all__ = ["train_fn", "eval_fn"]
def train_fn(train_dataloader, detector, optimizer, device, scheduler=None):
detector.train()
for images, targets in tqdm(train_dataloader):
images = list(image.to(device) for image in images)
# it's key:value for t in targets.items
# This is the format the fasterrcnn expects for targets
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = detector(images, targets)
losses = sum(loss for loss in loss_dict.values())
loss_value = losses.item()
optimizer.zero_grad()
losses.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
return loss_value
def eval_fn(val_dataloader, detector, device, detection_threshold=0.45):
results = []
detector.eval()
with torch.no_grad():
for images, targets in tqdm(val_dataloader):
images = list(image.to(device) for image in images)
model_time = time.time()
outputs = detector(images)
model_time = time.time() - model_time
# print("Inference time taken on image_batch = {}".format(model_time))
# outputs = [{k: v.to(device) for k, v in t.items()} for t in outputs]
# res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
for i, image in enumerate(images):
boxes = (
outputs[i]["boxes"].data.cpu().numpy()
) # Format of the output's box is [Xmin,Ymin,Xmax,Ymax]
scores = outputs[i]["scores"].data.cpu().numpy()
labels = outputs[i]["labels"].data.cpu().numpy()
# boxes = boxes[scores >= detection_threshold].astype(np.float)
# Compare the score of output with the threshold and
# select only those boxes whose score is greater
# scores = scores[scores >= detection_threshold]
# labels = labels[scores >= detection_threshold]
# image_id = image_ids[i]
result = { # Store the image id and boxes and scores in result dict.
# "image_id": image_id,
"boxes": boxes,
"scores": scores,
"labels": labels,
}
results.append(result)
return results
|
import os.path
import shutil
from unittest import skipUnless
from django.test import TestCase
from peacecorps.models import DonorInfo, Account
class GPGTests(TestCase):
def setUp(self):
self.account = Account.objects.create(code='FUNDFUND')
def tearDown(self):
self.account.delete()
def test_no_encryption(self):
"""With no encryption settings, fields still work"""
with self.settings(GNUPG_HOME=''):
di = DonorInfo(agency_tracking_id='TRACK', account=self.account,
xml='Plain Text')
di.save()
# Was saved in plain text in the DB
values = DonorInfo.objects.filter(pk=di.pk).values_list('xml')
byte_str = values[0][0]
if isinstance(byte_str, memoryview):
byte_str = byte_str.tobytes()
self.assertEqual(byte_str.decode('utf-8'), 'Plain Text')
# Decodes correctly
from_db = DonorInfo.objects.get(pk=di.pk)
self.assertEqual(from_db.xml, 'Plain Text')
@skipUnless(shutil.which('gpg'), "GPG is not installed")
def test_encryption(self):
"""Verify that fields *are* encrypted when GNUPG_HOME is set"""
with self.settings(GNUPG_HOME=os.path.join('peacecorps', 'tests',
'gpg'),
GPG_RECIPIENTS={
'peacecorps.DonorInfo.xml': 'C68F6B22'}):
di = DonorInfo(agency_tracking_id='TRACK', account=self.account,
xml='Plain Text')
di.save()
# Was *not* saved in plain text in the DB
values = DonorInfo.objects.filter(pk=di.pk).values_list('xml')
byte_str = values[0][0]
self.assertTrue('BEGIN PGP' in byte_str.decode('utf-8'))
# Decodes correctly
from_db = DonorInfo.objects.get(pk=di.pk)
self.assertEqual(from_db.xml, 'Plain Text')
|
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from sagemaker.huggingface import HuggingFace
from packaging.version import Version
from ..... import invoke_sm_helper_function
from ...integration.utils import processor, py_version, unique_name_from_base # noqa: F401
from test.test_utils import get_framework_and_version_from_tag, get_cuda_version_from_tag
RESOURCE_PATH = os.path.join(os.path.dirname(__file__), "..", "..", "resources")
BERT_PATH = os.path.join(RESOURCE_PATH, "scripts")
# hyperparameters, which are passed into the training job
hyperparameters = {
"max_steps": 10,
"train_batch_size": 16,
"model_name": "distilbert-base-uncased",
}
@pytest.mark.integration("hf_smdp")
@pytest.mark.model("hf_distilbert")
@pytest.mark.processor("gpu")
@pytest.mark.skip_cpu
@pytest.mark.skip_py2_containers
# TODO: Enable sagemaker debugger, resolve github issue after enabling.
# https://github.com/aws/deep-learning-containers/issues/1053
def test_hf_smdp(ecr_image, sagemaker_regions, instance_type, framework_version, tmpdir):
"""
Tests SMDataParallel single-node command via script mode
"""
invoke_sm_helper_function(ecr_image, sagemaker_regions, _test_hf_smdp_function,
instance_type, framework_version, py_version, tmpdir, 1)
@pytest.mark.processor("gpu")
@pytest.mark.skip_cpu
@pytest.mark.multinode(2)
@pytest.mark.integration("hf_smdp_multinode")
@pytest.mark.model("hf_distilbert")
@pytest.mark.skip_py2_containers
# Skipping `ml.p3dn.24xlarge` instance type due to capacity issue in us-west-2
# TODO: Enable sagemaker debugger, resolve github issue after enabling.
# https://github.com/aws/deep-learning-containers/issues/1053
def test_hf_smdp_multi(ecr_image, sagemaker_regions, instance_type, tmpdir, framework_version):
"""
Tests smddprun command via Estimator API distribution parameter
"""
invoke_sm_helper_function(ecr_image, sagemaker_regions, _test_hf_smdp_function,
instance_type, framework_version, py_version, tmpdir, 2)
def _test_hf_smdp_function(ecr_image, sagemaker_session, instance_type, framework_version, py_version, tmpdir,
instance_count):
_, image_framework_version = get_framework_and_version_from_tag(ecr_image)
image_cuda_version = get_cuda_version_from_tag(ecr_image)
instance_type = "ml.p3.16xlarge"
distribution = {"smdistributed": {"dataparallel": {"enabled": True}}}
estimator = HuggingFace(entry_point='train.py',
source_dir=BERT_PATH,
role='SageMakerRole',
instance_type=instance_type,
instance_count=instance_count,
image_uri=ecr_image,
framework_version=framework_version,
py_version=py_version,
sagemaker_session=sagemaker_session,
hyperparameters=hyperparameters,
distribution=distribution,
debugger_hook_config=False, # currently needed
)
estimator.fit(job_name=unique_name_from_base("test-tf-hf-smdp-multi"))
|
import inspect
from contextlib import contextmanager
"""
Implicit self emulation in Python using context managers, inspection and locals manipulation.
"""
def is_public(field_name: str):
return False if field_name.startswith('__') else True
@contextmanager
def in_context(context_object):
decorator_frame = inspect.currentframe().f_back
caller_frame = decorator_frame.f_back
caller_locals = caller_frame.f_locals
# horrible things, I know :p
locals_snapshot = caller_locals.copy()
caller_locals.update({field: getattr(context_object, field)
for field in dir(context_object)
if is_public(field)})
caller_locals['this'] = context_object
try:
yield
finally:
caller_locals.clear()
caller_locals.update(locals_snapshot)
test_dict = {}
with in_context(test_dict):
this['is'] = 'very'
update({'useful': 'example', 'idict': {}})
with in_context(this['idict']):
update({'internal': 'stuff'})
print(test_dict)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pymrt.computation: generic computation utilities for MRI data analysis.
See Also:
pymrt.recipes
"""
# ======================================================================
# :: Future Imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# todo: use kwargs instead of opts
# todo: get rid of tty colorify
# ======================================================================
# :: Python Standard Library Imports
import os # Miscellaneous operating system interfaces
import shutil # High-level file operations
# import math # Mathematical functions
# import time # Time access and conversions
# import datetime # Basic date and time types
# import operator # Standard operators as functions
# import collections # High-performance container datatypes
import itertools # Functions creating iterators for efficient looping
# import functools # Higher-order functions and operations on callable objects
import re # Regular expression operations
# import subprocess # Subprocess management
import multiprocessing # Process-based parallelism
# import inspect # Inspect live objects
# import csv # CSV File Reading and Writing [CSV: Comma-Separated Values]
import json # JSON encoder and decoder [JSON: JavaScript Object Notation]
import hashlib # Secure hashes and message digests
# :: External Imports
import numpy as np # NumPy (multidimensional numerical arrays library)
# import matplotlib as mpl # Matplotlib (2D/3D plotting library)
# import sympy as sym # SymPy (symbolic CAS library)
# import PIL # Python Image Library (image manipulation toolkit)
# import SimpleITK as sitk # Image ToolKit Wrapper
# import nibabel as nib # NiBabel (NeuroImaging I/O Library)
# import nipy # NiPy (NeuroImaging in Python)
# import nipype # NiPype (NiPy Pipelines and Interfaces)
# :: External Imports Submodules
# import matplotlib.pyplot as plt # Matplotlib's pyplot: MATLAB-like syntax
# import scipy.optimize # SciPy: Optimization Algorithms
# import scipy.integrate # SciPy: Integrations facilities
# import scipy.constants # SciPy: Mathematal and Physical Constants
# import scipy.stats # SciPy: Statistical functions
# :: Local Imports
import pymrt.utils as pmu
import pymrt.naming as pmn
import pymrt.input_output as pmio
# from dcmpi.lib.common import ID
# from pymrt import INFO
from pymrt import VERB_LVL, D_VERB_LVL
from pymrt import msg, dbg
# ======================================================================
META_EXT = 'info' # ID['info']
D_OPTS = {
# sources
'data_ext': pmu.EXT['niz'],
'meta_ext': META_EXT,
'multi_acq': False,
'use_meta': True,
'param_select': [None],
'match': None,
'pattern': [None],
'groups': None,
# compute
'types': [None],
'mask': [None],
'adapt_mask': True,
}
DICOM_INTERVAL = (0, 4095)
# ======================================================================
def _simple_affines(affines):
return tuple(affines[0] for affine in affines)
# ======================================================================
def preset_t1_mp2rage_builtin():
"""
Preset to get built-in T1 maps from the MP2RAGE sequence.
"""
new_opts = {
'types': ['T1', 'INV2M'],
'param_select': ['ProtocolName', '_series'],
'match': '(?i).*mp2rage.*',
'dtype': 'float',
'mask': [[None], [None], [None], [1]],
}
new_opts.update({
'compute_func': 'match_series',
'compute_kwargs': {
'matches': (
('.*_T1_Images.*', new_opts['types'][0]),
('.*_INV2(?!_PHS).*', new_opts['types'][1]),
),
}
})
return new_opts
# ======================================================================
def preset_t2s_memp2rage_loglin2():
"""
Preset to get built-in T2* maps from the ME-MP2RAGE sequence.
"""
new_opts = {
'types': ['T2S', 'T1w'],
'param_select': ['ProtocolName', 'EchoTime::ms', '_series'],
'match': '(?i).*me-mp2rage.*_INV2(?!_PHS).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'fit_monoexp_decay_loglin2',
'compute_kwargs': {
'ti_label': 'EchoTime::ms',
'img_types': {'tau': 'T2S', 's_0': 'T1w'}}
}
return new_opts
# ======================================================================
def preset_t2s_flash_loglin2():
"""
Preset to get T2* maps from multi-echo data using a log-linear fit.
"""
new_opts = {
'types': ['T2S', 'T1w'],
'param_select': ['ProtocolName', 'EchoTime::ms', '_series'],
'match': '(?i).*(gre|flash).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'fit_monoexp_decay_loglin',
'compute_kwargs': {
'ti_label': 'EchoTime::ms',
'img_types': {'tau': 'T2S', 's_0': 'T1w'}}
}
return new_opts
# ======================================================================
def preset_t2s_flash_builtin():
"""
Preset to get built-in T2* maps from the FLASH sequence.
"""
new_opts = {
'types': ['T2S', 'T1w'],
'param_select': ['ProtocolName', '_series'],
'match': '.*T2Star_Images.*',
'dtype': 'float',
}
return new_opts
# ======================================================================
def preset_t2s_multiecho_loglin():
"""
Preset to get T2* maps from multi-echo squared data using a log-linear fit.
"""
new_opts = {
'types': ['T2S', 'T1w'],
'param_select': ['ProtocolName', 'EchoTime::ms', '_series'],
'match': '(?i).*(gre|flash|me).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'fit_monoexp_decay_loglin2',
'compute_kwargs': {
'ti_label': 'EchoTime::ms',
'img_types': {'tau': 'T2S', 's_0': 'T1w'}}
}
return new_opts
# ======================================================================
def preset_t2s_multiecho_loglin2():
"""
Preset to get T2* maps from multi-echo squared data using a log-linear fit.
"""
new_opts = {
'types': ['T2S', 'T1w'],
'param_select': ['ProtocolName', 'EchoTime::ms', '_series'],
'match': '(?i).*(gre|flash|me).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'fit_monoexp_decay_loglin2',
'compute_kwargs': {
'ti_label': 'EchoTime::ms',
'img_types': {'tau': 'T2S', 's_0': 'T1w'}}
}
return new_opts
# ======================================================================
def preset_t2s_multiecho_leasq():
"""
Preset to get T2* maps from multi-echo data using a least-squares fit.
"""
new_opts = {
'types': ['T2S', 'T1w'],
'param_select': ['ProtocolName', 'EchoTime::ms', '_series'],
'match': '.*(FLASH|ME-MP2RAGE).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'fit_monoexp_decay_leasq',
'compute_kwargs': {
'ti_label': 'EchoTime::ms',
'img_types': {'tau': 'T2S', 's_0': 'T1w'}}
}
return new_opts
# ======================================================================
def preset_b1t_afi():
"""
Preset to get B1+ maps from the AFI sequence.
"""
new_opts = {
'types': ['B1T'],
'param_select': [
'ProtocolName', 'RepetitionTime::ms', 'FlipAngle::deg',
'_series'],
'match': '.*(afi|b1).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'calc_afi',
'compute_kwargs': {
'ti_label': 'RepetitionTime::ms',
'fa_label': 'FlipAngle::deg',
'img_types': {'eff': 'B1T'}}
}
return new_opts
# ======================================================================
def preset_qsm_as_legacy():
"""
Preset to get CHI maps from a multi-echo sequence.
"""
new_opts = {
'types': ['CHI', 'MSK'],
'param_select': [
'ProtocolName', 'EchoTime::ms', 'ImagingFrequency', '_series'],
# 'match': '.*((FLASH)|(ME-MP2RAGE.*INV2)).*',
'match': '.*(ME-MP2RAGE.*INV2).*',
'dtype': 'float',
'multi_acq': False,
'compute_func': 'ext_qsm_as_legacy',
'compute_kwargs': {
'te_label': 'EchoTime::ms',
'img_types': {'qsm': 'CHI', 'mask': 'MSK'}}
}
return new_opts
# ======================================================================
def ext_qsm_as_legacy(
images,
affines,
params,
te_label,
# b0_label,
# th_label,
img_types):
"""
Args:
images ():
affines ():
params ():
te_label ():
img_types ():
Returns:
"""
# determine correct TE
max_te = 25.0 # ms
selected = len(params[te_label])
for i, te in enumerate(params[te_label]):
if te < max_te:
selected = i
tmp_dirpath = '/tmp/{}'.format(hashlib.md5(str(params)).hexdigest())
if not os.path.isdir(tmp_dirpath):
os.makedirs(tmp_dirpath)
tmp_filenames = ('magnitude.nii.gz', 'phase.nii.gz',
'qsm.nii.gz', 'mask.nii.gz')
tmp_filepaths = tuple(os.path.join(tmp_dirpath, tmp_filename)
for tmp_filename in tmp_filenames)
# export temp input
if len(images) > 2:
images = images[-2:]
affines = affines[-2:]
for image, affine, tmp_filepath in zip(images, affines, tmp_filepaths):
pmio.save(tmp_filepath, image[..., selected], affine)
# execute script on temp input
cmd = [
'qsm_as_legacy.py',
'--magnitude_input', tmp_filepaths[0],
'--phase_input', tmp_filepaths[1],
'--qsm_output', tmp_filepaths[2],
'--mask_output', tmp_filepaths[3],
'--echo_time', str(params[te_label][selected]),
# '--field_strength', str(params[b0_label][selected]),
# '--angles', str(params[th_label][selected]),
'--units', 'ppb']
pmu.execute(str(' '.join(cmd)))
# import temp output
img_list, aff_list = [], []
for tmp_filepath in tmp_filepaths[2:]:
img, aff, hdr = pmio.load(tmp_filepath, full=True)
img_list.append(img)
aff_list.append(aff)
# clean up tmp files
if os.path.isdir(tmp_dirpath):
shutil.rmtree(tmp_dirpath)
# prepare output
type_list = ('qsm', 'mask')
params_list = ({'te': params[te_label][selected]}, {})
img_type_list = tuple(img_types[key] for key in type_list)
return img_list, aff_list, img_type_list, params_list
# ======================================================================
def qsm_sdi(
images,
affines,
params,
img_types):
pass
# ======================================================================
def calc_afi(
images,
affines,
params,
ti_label,
fa_label,
img_types):
"""
Fit monoexponential decay to images using the log-linear method.
"""
y_arr = np.stack(images, -1).astype(float)
s_arr = pmu.polar2complex(y_arr[..., 0], fix_phase_interval(y_arr[..., 1]))
# s_arr = images[0]
t_r = params[ti_label]
nominal_fa = params[fa_label]
mask = s_arr[..., 0] != 0.0
r = np.zeros_like(s_arr[..., 1])
r[mask] = s_arr[..., 0][mask] / s_arr[..., 1][mask]
n = t_r[1] / t_r[0] # usually: t_r[1] > t_r[0]
fa = np.rad2deg(np.real(np.arccos((r * n - 1) / (n - r))))
img_list = [fa / nominal_fa]
aff_list = _simple_affines(affines)
type_list = ['eff']
img_type_list = tuple(img_types[key] for key in type_list)
params_list = ({},) * len(img_list)
return img_list, aff_list, img_type_list, params_list
# ======================================================================
def time_to_rate(
array,
in_units='ms',
out_units='Hz'):
k = 1.0
if in_units == 'ms':
k *= 1.0e3
if out_units == 'kHz':
k *= 1.0e-3
array[array != 0.0] = k / array[array != 0.0]
return array
# ======================================================================
def rate_to_time(
array,
in_units='Hz',
out_units='ms'):
k = 1.0
if in_units == 'kHz':
k *= 1.0e3
if out_units == 'ms':
k *= 1.0e-3
array[array != 0.0] = k / array[array != 0.0]
return array
# ======================================================================
def fix_phase_interval(arr):
"""
Ensure that the range of values is interpreted as valid phase information.
This is useful for DICOM-converted images (without post-processing).
Args:
arr (np.ndarray): Array to be processed.
Returns:
array (np.ndarray): An array scaled to (-pi,pi).
Examples:
>>> fix_phase_interval(np.arange(8))
array([-3.14159265, -2.24399475, -1.34639685, -0.44879895, 0.44879895,
1.34639685, 2.24399475, 3.14159265])
>>> fix_phase_interval(np.array([-10, -5, 0, 5, 10]))
array([-3.14159265, -1.57079633, 0. , 1.57079633, 3.14159265])
>>> fix_phase_interval(np.array([-10, 10, 1, -3]))
array([-3.14159265, 3.14159265, 0.31415927, -0.9424778 ])
"""
# correct phase value range (useful for DICOM-converted images)
if np.ptp(arr) > 2.0 * np.pi:
arr = pmu.scale(arr.astype(float), (-np.pi, np.pi))
return arr
# ======================================================================
def func_exp_recovery(t_arr, tau, s_0, eff=1.0, const=0.0):
"""
s(t)= s_0 * (1 - 2 * eff * exp(-t/tau)) + const
[s_0 > 0, tau > 0, eff > 0]
"""
if s_0 > 0.0 and tau > 0.0 and eff > 0.0:
s_t_arr = s_0 * (1.0 - 2.0 * eff * np.exp(-t_arr / tau)) + const
else:
s_t_arr = np.tile(np.inf, len(t_arr))
return s_t_arr
# ======================================================================
def func_exp_decay(t_arr, tau, s_0, const=0.0):
"""
s(t)= s_0 * exp(-t/tau) + const
[s_0 > 0, tau > 0]
"""
s_t_arr = s_0 * np.exp(-t_arr / tau) + const
# if s_0 > 0.0 and tau > 0.0:
# s_t_arr = s_0 * np.exp(-t_arr / tau) + const
# else:
# s_t_arr = np.tile(np.inf, len((t_arr)))
return s_t_arr
# ======================================================================
def func_flash(m0, fa, tr, t1, te, t2s):
"""
The FLASH (a.k.a. GRE, TFL, SPGR) signal expression:
S = M0 sin(fa) exp(-TE/T2*) (1 - exp(-TR/T1)) / (1 - cos(fa) exp(-TR/T1))
"""
return m0 * np.sin(fa) * np.exp(-te / t2s) * \
(1.0 - np.exp(-tr / t1)) / (1.0 - np.cos(fa) * np.exp(-tr / t1))
# ======================================================================
def uniform_mp2rage(
inv1m_arr,
inv1p_arr,
inv2m_arr,
inv2p_arr,
regularization=np.spacing(1),
values_interval=None):
"""
Calculate the uniform image from an MP2RAGE acquisition.
Args:
inv1m_arr (float|np.ndarray): Magnitude of the first inversion image.
inv1p_arr (float|np.ndarray): Phase of the first inversion image.
inv2m_arr (float|np.ndarray): Magnitude of the second inversion image.
inv2p_arr (float|np.ndarray): Phase of the second inversion image.
regularization (float|int): Parameter for the regularization.
This parameter is added to the denominator of the signal expression
for normalization purposes, therefore should be much smaller than
the average of the magnitude images.
Larger values of this parameter will have the side effect of
denoising the background.
values_interval (tuple[float|int]|None): The output values interval.
The standard values are linearly converted to this range.
Returns:
rho_arr (float|np.ndarray): The calculated uniform image from MP2RAGE.
"""
if not regularization:
regularization = 0
inv1m_arr = inv1m_arr.astype(float)
inv2m_arr = inv2m_arr.astype(float)
inv1p_arr = fix_phase_interval(inv1p_arr)
inv2p_arr = fix_phase_interval(inv2p_arr)
inv1_arr = pmu.polar2complex(inv1m_arr, inv1p_arr)
inv2_arr = pmu.polar2complex(inv2m_arr, inv2p_arr)
rho_arr = np.real(inv1_arr.conj() * inv2_arr /
(inv1m_arr ** 2 + inv2m_arr ** 2 + regularization))
if values_interval:
print(values_interval, 'scaling')
rho_arr = scale(rho_arr, values_interval, (-0.5, 0.5))
return rho_arr
# ======================================================================
def t1_mp2rage(
inv1m_arr=None,
inv1p_arr=None,
inv2m_arr=None,
inv2p_arr=None,
rho_arr=None,
regularization=np.spacing(1),
eff_arr=None,
t1_value_range=(100, 5000),
t1_num=512,
eff_num=32,
**acq_param_kws):
"""
Calculate the T1 map from an MP2RAGE acquisition.
Args:
inv1m_arr (float|np.ndarray): Magnitude of the first inversion image.
inv1p_arr (float|np.ndarray): Phase of the first inversion image.
inv2m_arr (float|np.ndarray): Magnitude of the second inversion image.
inv2p_arr (float|np.ndarray): Phase of the second inversion image.
eff_arr (float|np.array|None): Efficiency of the RF pulse excitation.
This is equivalent to the normalized B1T field.
Note that this must have the same spatial dimensions as the images
acquired with MP2RAGE.
If None, no correction for the RF efficiency is performed.
t1_value_range (tuple[float]): The T1 value range to consider.
The format is (min, max) where min < max.
Values should be positive.
t1_num (int): The base number of sampling points of T1.
The actual number of sampling points is usually smaller, because of
the removal of non-bijective branches.
This affects the precision of the MP2RAGE estimation.
eff_num (int): The base number of sampling points for the RF efficiency.
This affects the precision of the RF efficiency correction.
**acq_param_kws (dict): The acquisition parameters.
This should match the signature of: `mp2rage.acq_to_seq_params`.
Returns:
t1_arr (float|np.ndarray): The calculated T1 map for MP2RAGE.
"""
from pymrt.sequences import mp2rage
import matplotlib.pyplot as plt
if eff_arr:
# todo: implement B1T correction
raise NotImplementedError('B1T correction is not yet implemented')
else:
# determine the signal expression
t1 = np.linspace(t1_value_range[0], t1_value_range[1], t1_num)
seq_param_kws = mp2rage.acq_to_seq_params(**acq_param_kws)[0]
rho = mp2rage.signal(t1, **seq_param_kws)
# plot T1 vs. RHO
plt.figure()
plt.plot(rho, t1)
plt.xlabel('RHO')
plt.ylabel('T1 (ms)')
plt.title('T1 vs. RHO')
plt.savefig('T1_vs_RHO.pdf', format='PDF', transparent=True)
# remove non-bijective branches
bijective_part = pmu.bijective_part(rho)
t1 = t1[bijective_part]
rho = rho[bijective_part]
if rho[0] > rho[-1]:
rho = rho[::-1]
t1 = t1[::-1]
# plot the bijective part of the graph
plt.figure()
plt.plot(rho, t1)
plt.xlabel('RHO')
plt.ylabel('T1 (ms)')
plt.title('T1 vs. RHO (bijective part only)')
plt.savefig('T1_vs_RHO_bij.pdf', format='PDF', transparent=True)
# check that rho values are strictly increasing
if not np.all(np.diff(rho) > 0):
raise ValueError('MP2RAGE look-up table was not properly prepared.')
if rho_arr == None:
rho_arr = uniform_mp2rage(inv1m_arr, inv1p_arr, inv2m_arr, inv2p_arr, regularization, values_interval=None)
else:
rho_arr = pmu.scale(rho_arr, (-0.5, 0.5), DICOM_INTERVAL)
print(np.min(rho_arr), np.max(rho_arr))
t1_arr = np.interp(rho_arr, rho, t1)
return t1_arr, rho_arr
# ======================================================================
def fit_monoexp_decay_leasq(
images,
affines,
params,
ti_label,
img_types):
"""
Fit monoexponential decay to images using the least-squares method.
"""
norm_factor = 1e4
y_arr = np.stack(images, -1).astype(float)
y_arr = y_arr[..., 0] # use only the modulus
y_arr = y_arr / np.max(y_arr) * norm_factor
x_arr = np.array(params[ti_label]).astype(float)
p_arr = voxel_curve_fit(
y_arr, x_arr, func_exp_decay,
(np.mean(x_arr), np.mean(y_arr)), method='curve_fit')
img_list = np.split(p_arr, 2, -1)
type_list = ('tau', 's_0')
img_type_list = tuple(img_types[key] for key in type_list)
aff_list = _simple_affines(affines)
params_list = ({},) * len(img_list)
return img_list, aff_list, img_type_list, params_list
# ======================================================================
def fit_monoexp_decay_loglin(
images,
affines,
params,
ti_label,
img_types):
"""
Fit monoexponential decay to images using the log-linear method.
"""
def prepare(arr, factor=0):
log_arr = np.zeros_like(arr)
# calculate logarithm only of strictly positive values
log_arr[arr > 0.0] = np.log(arr[arr > 0.0] * np.e ** factor)
return log_arr
def fix(arr, factor=0):
# tau = p_arr[..., 0]
# s_0 = p_arr[..., 1]
mask = arr[..., 0] != 0.0
arr[..., 0][mask] = - 1.0 / arr[..., 0][mask]
arr[..., 1] = np.exp(arr[..., 1] - factor)
return arr
exp_factor = 12 # 0: untouched, other values might improve results
y_arr = np.stack(images, -1).astype(float)
y_arr = y_arr[..., 0] # use only the modulus
x_arr = np.array(params[ti_label]).astype(float)
p_arr = voxel_curve_fit(
y_arr, x_arr,
None, (np.mean(x_arr), np.mean(y_arr)),
prepare, [exp_factor], {},
fix, [exp_factor], {},
method='poly')
img_list = np.split(p_arr, 2, -1)
aff_list = _simple_affines(affines)
type_list = ('tau', 's_0')
img_type_list = tuple(img_types[key] for key in type_list)
params_list = ({},) * len(img_list)
return img_list, aff_list, img_type_list, params_list
# ======================================================================
def fit_monoexp_decay_loglin2(
images,
affines,
params,
ti_label,
img_types):
"""
Fit monoexponential decay to squared images using the log-linear method.
"""
def prepare(arr, factor=0, noise=0):
log_arr = np.zeros_like(arr)
# calculate logarithm only of strictly positive values
arr -= noise
mask = arr > 0.0
log_arr[mask] = np.log(arr[mask] ** 2.0 * np.e ** factor)
return log_arr
def fix(arr, factor=0):
# tau = p_arr[..., 0]
# s_0 = p_arr[..., 1]
mask = arr[..., 0] != 0.0
arr[..., 0][mask] = - 2.0 / arr[..., 0][mask]
arr[..., 1] = np.exp(arr[..., 1] - factor)
return arr
exp_factor = 12 # 0: untouched, other values might improve results
y_arr = np.stack(images, -1).astype(float)
y_arr = y_arr[..., 0] # use only the modulus
x_arr = np.array(params[ti_label]).astype(float)
noise_level = np.percentile(y_arr, 3)
p_arr = voxel_curve_fit(
y_arr, x_arr,
None, (np.mean(x_arr), np.mean(y_arr)),
prepare, [exp_factor, noise_level], {},
fix, [exp_factor], {},
method='poly')
img_list = np.split(p_arr, 2, -1)
aff_list = _simple_affines(affines)
type_list = ('tau', 's_0')
img_type_list = tuple(img_types[key] for key in type_list)
params_list = ({},) * len(img_list)
return img_list, aff_list, img_type_list, params_list
# ======================================================================
def voxel_curve_fit(
y_arr,
x_arr,
fit_func=None,
fit_params=None,
pre_func=None,
pre_args=None,
pre_kwargs=None,
post_func=None,
post_args=None,
post_kwargs=None,
method='curve_fit'):
"""
Curve fitting for y = F(x, p)
Args:
y_arr (np.ndarray): Dependent variable with x dependence in the n-th dim
x_arr (np.ndarray): Independent variable with same size as n-th dim of y
fit_func (func):
fit_params (list[float]):
pre_func (func):
pre_args (list):
pre_kwargs (dict):
post_func (func):
post_args (list):
post_kwargs (dict):
method (str): Method to use for the curve fitting procedure.
Returns:
p_arr (np.ndarray) :
"""
# TODO: finish documentation
# y_arr : ndarray ???
# Dependent variable (x dependence in the n-th dimension).
# x_arr : ndarray ???
# Independent variable (same number of elements as the n-th dimension).
# reshape to linearize the independent dimensions of the array
support_axis = -1
shape = y_arr.shape
support_size = shape[support_axis]
y_arr = y_arr.reshape((-1, support_size))
num_voxels = y_arr.shape[0]
p_arr = np.zeros((num_voxels, len(fit_params)))
# preprocessing
if pre_func is not None:
if pre_args is None:
pre_args = []
if pre_kwargs is None:
pre_kwargs = {}
y_arr = pre_func(y_arr, *pre_args, **pre_kwargs)
if method == 'curve_fit':
iter_param_list = [
(fit_func, x_arr, y_i_arr, fit_params)
for y_i_arr in np.split(y_arr, support_size, 0)]
pool = multiprocessing.Pool(multiprocessing.cpu_count())
for i, (par_opt, par_cov) in \
enumerate(pool.imap(pmu.curve_fit, iter_param_list)):
p_arr[i] = par_opt
elif method == 'poly':
# polyfit requires to change matrix orientation using transpose
p_arr = np.polyfit(x_arr, y_arr.transpose(), len(fit_params) - 1)
# transpose the results back
p_arr = p_arr.transpose()
else:
try:
p_arr = fit_func(y_arr, x_arr, fit_params)
except Exception as ex:
print('WW: Exception "{}" in ndarray_fit() method "{}"'.format(
ex, method))
# revert to original shape
p_arr = p_arr.reshape(list(shape[:support_axis]) + [len(fit_params)])
# post process
if post_func is not None:
if post_args is None:
post_args = []
if post_kwargs is None:
post_kwargs = {}
p_arr = post_func(p_arr, *post_args, **post_kwargs)
return p_arr
# ======================================================================
def match_series(images, affines, params, matches):
"""
TODO: finish documentation
"""
img_list, aff_list, img_type_list = [], [], []
for match, img_type in matches:
for i, series in enumerate(params['_series']):
if re.match(match, series):
# print(match, series, img_type, images[i].shape) # DEBUG
img_list.append(images[i])
aff_list.append(affines[i])
img_type_list.append(img_type)
break
params_list = ({},) * len(img_list)
return img_list, aff_list, img_type_list, params_list
# ======================================================================
def sources_generic(
data_dirpath,
meta_dirpath=None,
opts=None,
force=False,
verbose=D_VERB_LVL):
"""
Get source files (both data and metadata) from specified directories
Args:
data_dirpath (str): Directory containing data files
meta_dirpath (str|None): Directory containing metadata files
opts (dict):
Accepted options:
- data_ext (str): File extension of the data files
- meta_ext (str): File extension of the metadata files
- multi_acq (bool): Use multiple acquisitions for computation
- use_meta (bool): Use metadata, instead of filenames, to get
parameters
- param_select (list[str]): Parameters to select from metadata
- match (str): regular expression used to select data filenames
- pattern (tuple[int]): Slicing applied to data list
- groups (list[int]|None): Split results into groups
(cyclically)
force (bool): Force calculation of output
verbose (int): Set level of verbosity.
Returns:
sources_list (list[list[str]]): List of lists of filenames to be used
for computation
params_list : (list[list[str|float|int]]): List of lists of parameters
associated with the specified sources
See Also:
pymrt.computation.compute_generic,
pymrt.computation.compute,
pymrt.computation.D_OPTS
"""
sources_list = []
params_list = []
opts = pmu.merge_dicts(D_OPTS, opts)
if verbose >= VERB_LVL['medium']:
print('Opts:\t{}'.format(json.dumps(opts)))
if os.path.isdir(data_dirpath):
pattern = slice(*opts['pattern'])
sources, params = [], {}
last_acq, new_acq = None, None
data_filepath_list = pmu.listdir(
data_dirpath, opts['data_ext'], pattern)
for data_filepath in data_filepath_list:
info = pmn.parse_filename(
pmu.change_ext(pmu.os.path.basename(data_filepath), '',
pmu.EXT['niz']))
if opts['use_meta']:
# import parameters from metadata
info['seq'] = None
series_meta_filepath = os.path.join(
meta_dirpath,
pmn.to_filename(info, ext=opts['meta_ext']))
if os.path.isfile(series_meta_filepath):
with open(series_meta_filepath, 'r') as meta_file:
series_meta = json.load(meta_file)
acq_meta_filepath = os.path.join(
meta_dirpath, series_meta['_acquisition'] +
pmu.add_extsep(opts['meta_ext']))
if os.path.isfile(acq_meta_filepath):
with open(acq_meta_filepath, 'r') as meta_file:
acq_meta = json.load(meta_file)
data_params = {}
if opts['param_select']:
for item in opts['param_select']:
data_params[item] = acq_meta[item] \
if item in acq_meta else None
else:
data_params = acq_meta
new_acq = (last_acq and acq_meta['_series'] != last_acq)
last_acq = acq_meta['_series']
else:
# import parameters from filename
base, data_params = pmn.parse_series_name(info['name'])
new_acq = (last_acq and base != last_acq)
last_acq = base
if not opts['multi_acq'] and new_acq and sources:
sources_list.append(sources)
params_list.append(params)
sources, params = [], {}
if not opts['match'] or \
re.match(opts['match'], os.path.basename(data_filepath)):
sources.append(data_filepath)
if opts['use_meta']:
params.update(data_params)
else:
for key, val in data_params.items():
params[key] = (params[key] if key in params else []) \
+ [val]
if sources:
sources_list.append(sources)
params_list.append(params)
if opts['groups']:
grouped_sources_list, grouped_params_list = [], []
grouped_sources, grouped_params = [], []
for sources, params in zip(sources_list, params_list):
grouping = list(opts['groups']) * \
int((len(sources) / sum(opts['groups'])) + 1)
seps = pmu.accumulate(grouping) if grouping else []
for i, source in enumerate(sources):
grouped_sources.append(source)
grouped_params.append(params)
if i + 1 in seps or i + 1 == len(sources):
grouped_sources_list.append(grouped_sources)
grouped_params_list.append(grouped_params)
grouped_sources, grouped_params = [], []
sources_list = grouped_sources_list
params_list = grouped_params_list
if verbose >= VERB_LVL['debug']:
for sources, params in zip(sources_list, params_list):
print(pmu.tty_colorify('DEBUG', 'r'))
print(sources, params)
elif verbose >= VERB_LVL['medium']:
print("WW: no data directory '{}'. Skipping.".format(data_dirpath))
return sources_list, params_list
# ======================================================================
def compute_generic(
sources,
out_dirpath,
params=None,
opts=None,
force=False,
verbose=D_VERB_LVL):
"""
Perform the specified computation on source files.
Args:
sources (list[str]): Directory containing data files.
out_dirpath (str): Directory containing metadata files.
params (dict): Parameters associated with the sources.
opts (dict):
Accepted options:
- types (list[str]): List of image types to use for results.
- mask: (tuple[tuple[int]): Slicing for each dimension.
- adapt_mask (bool): adapt over- or under-sized mask.
- dtype (str): data type to be used for the target images.
- compute_func (str): function used for the computation.
compute_func(images, params, compute_args, compute_kwargs)
-> img_list, img_type_list
- compute_args (list): additional positional parameters for
compute_func
- compute_kwargs (dict): additional keyword parameters for
compute_func
- affine_func (str): name of the function for affine
computation: affine_func(affines, affine_args...) -> affine
- affine_args (list): additional parameters for affine_func
force (bool): Force calculation of output
verbose (int): Set level of verbosity.
Returns:
targets ():
See Also:
pymrt.computation.sources_generic,
pymrt.computation.compute,
pymrt.computation.D_OPTS
"""
# TODO: implement affine_func, affine_args, affine_kwargs?
# get the num, name and seq from first source file
opts = pmu.merge_dicts(D_OPTS, opts)
if params is None:
params = {}
if opts is None:
opts = {}
targets = []
info = pmn.parse_filename(sources[0])
if 'ProtocolName' in params:
info['name'] = params['ProtocolName']
for image_type in opts['types']:
info['type'] = image_type
targets.append(os.path.join(out_dirpath, pmn.to_filename(info)))
# perform the calculation
if pmu.check_redo(sources, targets, force):
if verbose > VERB_LVL['none']:
print('{}:\t{}'.format('Object', os.path.basename(info['name'])))
if verbose >= VERB_LVL['medium']:
print('Opts:\t{}'.format(json.dumps(opts)))
images, affines = [], []
mask = [
(slice(*dim) if dim is not None else slice(None))
for dim in opts['mask']]
for source in sources:
if verbose > VERB_LVL['none']:
print('Source:\t{}'.format(os.path.basename(source)))
if verbose > VERB_LVL['none']:
print('Params:\t{}'.format(params))
image, affine, header = pmio.load(source, full=True)
# fix mask if shapes are different
if opts['adapt_mask']:
mask = [
(mask[i] if i < len(mask) else slice(None))
for i in range(len(image.shape))]
images.append(image[mask])
affines.append(affine)
if 'compute_func' in opts:
compute_func = eval(opts['compute_func'])
if 'compute_args' not in opts:
opts['compute_args'] = []
if 'compute_kwargs' not in opts:
opts['compute_kwargs'] = {}
img_list, aff_list, img_type_list, params_list = compute_func(
images, affines, params,
*opts['compute_args'], **opts['compute_kwargs'])
else:
img_list, aff_list, img_type_list = zip(
*[(img, aff, img_type) for img, aff, img_type
in zip(images, affines, itertools.cycle(opts['types']))])
params_list = ({},) * len(img_list)
for target, target_type in zip(targets, opts['types']):
for img, aff, img_type, params in \
zip(img_list, aff_list, img_type_list, params_list):
if img_type == target_type:
if 'dtype' in opts:
img = img.astype(opts['dtype'])
if params:
for key, val in params.items():
target = pmn.change_param_val(target, key, val)
if verbose > VERB_LVL['none']:
print('Target:\t{}'.format(os.path.basename(target)))
pmio.save(target, img, aff)
break
return targets
# ======================================================================
def compute(
sources_func,
sources_args,
sources_kwargs,
compute_func,
compute_args,
compute_kwargs,
in_dirpath,
out_dirpath,
recursive=False,
meta_subpath=None,
data_subpath=None,
verbose=D_VERB_LVL):
"""
Interface to perform calculation from all input files within a path.
If recursive flag is set or if input directory contains no suitable file,
it tries to descend into subdirectories.
If meta_subpath is set, it will look there for metadata files.
If data_subpath is set, it will look there for data files.
Args:
sources_func (func): Returns a list of list of filepaths used as input.
Each list of filepaths should contain the exhaustive input for the
computation to be performed. Function expected signature:
sources_func(data_path, meta_path, sources_args...) ->
((string, dict) list) list.
sources_args (list): Positional parameters passed to get_sources_func.
sources_kwargs (dict): Keyword parameters passed to get_sources_func.
compute_func (func): Calculation to perform on each list of filepaths.
Function expected signature:
compute_func(source_list, out_dirpath, compute_args...) ->
out_filepath.
compute_args (list): Positional parameters passed to compute_func.
compute_kwargs (dict): Keyword parameters passed to compute_func.
in_dirpath (str): Path to input directory path.
out_dirpath (str): Path to output directory path.
The input directory structure is preserved during the recursion.
recursive (bool): Process subdirectories recursively.
meta_subpath (str): Subpath appended when searching for metadata.
Appending is performed (non-cumulatively) at each iteration
recursion.
data_subpath (str): Subpath appended when searching for data.
Appending is performed (non-cumulatively) at each iteration
recursion.
verbose (int): Set level of verbosity.
Returns:
None
See Also:
pymrt.computation.compute_generic,
pymrt.computation.source_generic,
pymrt.computation.D_OPTS
"""
# handle extra subdirectories in input path
data_dirpath = os.path.join(in_dirpath, data_subpath) \
if data_subpath is not None else in_dirpath
meta_dirpath = os.path.join(in_dirpath, meta_subpath) \
if meta_subpath is not None else None
# extract input files from directory
sources_list, params_list = sources_func(
data_dirpath, meta_dirpath, *sources_args, **sources_kwargs)
if sources_list and params_list:
if not out_dirpath:
out_dirpath = in_dirpath
elif not os.path.exists(out_dirpath):
os.makedirs(out_dirpath)
if verbose > VERB_LVL['none']:
print('Input:\t{}'.format(in_dirpath))
print('Output:\t{}'.format(out_dirpath))
if verbose >= VERB_LVL['medium']:
print('Data subpath:\t{}'.format(data_subpath))
if meta_dirpath and verbose >= VERB_LVL['medium']:
print('Meta subpath:\t{}'.format(meta_subpath))
for sources, params in zip(sources_list, params_list):
compute_func(
sources, out_dirpath, params,
*compute_args, **compute_kwargs)
pmu.elapsed('Time: ')
if verbose >= VERB_LVL['medium']:
pmu.print_elapsed(only_last=True)
else:
recursive = True
# descend into subdirectories
if recursive:
recursive = recursive or bool(sources_list)
subdirs = [subdir for subdir in os.listdir(in_dirpath)
if os.path.isdir(os.path.join(in_dirpath, subdir))]
for subdir in subdirs:
new_in_dirpath = os.path.join(in_dirpath, subdir)
new_out_dirpath = os.path.join(out_dirpath, subdir)
compute(
sources_func, sources_args, sources_kwargs,
compute_func, compute_args, compute_kwargs,
new_in_dirpath, new_out_dirpath, recursive,
meta_subpath, data_subpath, verbose)
# ======================================================================
if __name__ == '__main__':
msg(__doc__.strip())
pmu.elapsed('pymrt.computation')
|
from pydantic import BaseModel
from pathlib import Path
class Config(BaseModel):
PACKAGE_DIR = Path(__file__).parent.resolve()
DATASET_DIR = PACKAGE_DIR / "data"
TRAINED_MODELS_DIR = PACKAGE_DIR / "trained_models"
# Create directories
TRAINED_MODELS_DIR.mkdir(exist_ok=True)
DATASET_DIR.mkdir(exist_ok=True)
def set_tensorflow_seeds(self, seed=0):
import tensorflow as tf
import numpy as np
import random as python_random
np.random.seed(seed)
python_random.seed(seed)
tf.random.set_seed(seed)
def set_matplotlib(self, format="svg"):
from matplotlib_inline import backend_inline
backend_inline.set_matplotlib_formats(format)
def set_ignore_warnings(self):
import warnings
warnings.simplefilter(action='ignore')
def list_tensorflow_devices(self):
import tensorflow as tf
return tf.config.list_physical_devices()
config = Config() |
class Solution:
def findContentChildren(self, g: List[int], s: List[int]) -> int:
|
import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_cube_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
"""
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states",
JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
|
# ingreso = input("ingrese palabras")
# def totalElementos(ingreso) :
# count = 0
# for element in ingreso:
# count += 1
# return count
# print("The total number of elements in the list: ", totalElementos(ingreso))
# listaPalabras = ingreso.split()
# frecuenciaPalab = []
# for i in listaPalabras:
# frecuenciaPalab.append(listaPalabras.count(i))
# print("Cadena\n" + ingreso +"\n")
# print("Lista\n" + str(listaPalabras) + "\n")
# print("Pares\n" + str(list(zip(listaPalabras, frecuenciaPalab))))
#ingreso = "barco casa barco perro lote lote perro perro naranja tomate "
|
from typing import Generic, Iterable, Optional, Tuple, TypeVar, cast
from jstools.screeps import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
__pragma__('noalias', 'values')
__pragma__('skip')
K = TypeVar('K')
V = TypeVar('V')
class JSMap(Generic[K, V]):
def has(self, key: K) -> bool:
pass
def get(self, key: K) -> V:
pass
def set(self, key: K, value: V) -> None:
pass
def delete(self, key: K) -> None:
pass
def entries(self) -> Iterable[Tuple[K, V]]:
pass
def keys(self) -> Iterable[K]:
pass
def values(self) -> Iterable[V]:
pass
def js_clear(self) -> None:
pass
@property
def size(self) -> int:
return 0
class JSSet(Generic[K]):
def has(self, key: K) -> bool:
pass
def add(self, key: K) -> None:
pass
def delete(self, key: K) -> None:
pass
def keys(self) -> Iterable[K]:
pass
def values(self) -> Iterable[K]:
pass
def js_clear(self) -> None:
pass
@property
def size(self) -> int:
return 0
__pragma__('noskip')
def new_map(iterable = undefined):
# type: (Optional[Iterable[Tuple[K, V]]]) -> JSMap[K, V]
"""
:rtype: JSMap
"""
return cast(JSMap, __new__(__pragma__('js', 'Map')(iterable)))
def new_set(iterable = undefined):
# type: (Optional[Iterable[K]]) -> JSSet[K]
"""
:rtype: JSSet
"""
return cast(JSSet, __new__(__pragma__('js', 'Set')(iterable)))
|
from typing import Any, Callable, Iterable, List, Mapping, Set, TypeVar
from . import Input, BufferedInput
InputType = TypeVar('InputType', Input, BufferedInput)
class InputSynchronizer:
input_names: List[str]
inputs: Set[InputType]
ready: Set[InputType]
handler: Callable[[Iterable[Input]], Any]
def __init__(self, handler: Callable, inputs: Mapping[str, InputType] = {}):
self.ready = set()
self.input_names = {k for k in inputs.keys()}
self.inputs = {i.push(None) for i in inputs.values()}
def handle_input_change(self, input: InputType) -> None:
self.ready.add(input)
if self.ready == self.inputs:
self.ready.clear()
self.inputs = {i.actualize() for i in self.inputs}
args = zip(self.input_names, self.inputs)
self.handler(**{k: i.value for k, i in args})
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# CC Vary Header is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Default configuration."""
from __future__ import absolute_import, print_function
from invenio_indexer.api import RecordIndexer
from invenio_records_files.api import Record
from invenio_records_rest.facets import terms_filter
from invenio_records_rest.utils import allow_all, check_elasticsearch
from invenio_search import RecordsSearch
def _(x):
"""Identity function for string extraction."""
return x
RECORDS_REST_ENDPOINTS = {
'recid': dict(
pid_type='recid',
pid_minter='recid',
pid_fetcher='recid',
default_endpoint_prefix=True,
record_class=Record,
search_class=RecordsSearch,
indexer_class=RecordIndexer,
search_index='records',
search_type=None,
record_serializers={
'application/json': ('cc_vary_header.records.serializers'
':json_v1_response'),
'application/x-custom': ('cc_vary_header.records.serializers'
':custom_v1_response'),
},
search_serializers={
'application/json': ('cc_vary_header.records.serializers'
':json_v1_search'),
},
record_loaders={
'application/json': ('cc_vary_header.records.loaders'
':json_v1'),
},
list_route='/records/',
item_route='/records/<pid(recid,'
'record_class="invenio_records_files.api.Record")'
':pid_value>',
default_media_type='application/json',
max_result_window=10000,
error_handlers=dict(),
create_permission_factory_imp=allow_all,
read_permission_factory_imp=check_elasticsearch,
update_permission_factory_imp=allow_all,
delete_permission_factory_imp=allow_all,
list_permission_factory_imp=allow_all
),
}
"""REST API for cc-vary-header."""
RECORDS_UI_ENDPOINTS = {
'recid': {
'pid_type': 'recid',
'route': '/records/<pid_value>',
'template': 'records/record.html',
},
}
"""Records UI for cc-vary-header."""
SEARCH_UI_JSTEMPLATE_RESULTS = 'templates/records/results.html'
"""Result list template."""
PIDSTORE_RECID_FIELD = 'id'
CC_VARY_HEADER_ENDPOINTS_ENABLED = True
"""Enable/disable automatic endpoint registration."""
RECORDS_REST_FACETS = dict(
records=dict(
aggs=dict(
type=dict(terms=dict(field='type')),
keywords=dict(terms=dict(field='keywords'))
),
post_filters=dict(
type=terms_filter('type'),
keywords=terms_filter('keywords'),
)
)
)
"""Introduce searching facets."""
RECORDS_REST_SORT_OPTIONS = dict(
records=dict(
bestmatch=dict(
title=_('Best match'),
fields=['_score'],
default_order='desc',
order=1,
),
mostrecent=dict(
title=_('Most recent'),
fields=['-_created'],
default_order='asc',
order=2,
),
)
)
"""Setup sorting options."""
RECORDS_REST_DEFAULT_SORT = dict(
records=dict(
query='bestmatch',
noquery='mostrecent',
)
)
"""Set default sorting options."""
RECORDS_FILES_REST_ENDPOINTS = {
'RECORDS_REST_ENDPOINTS': {
'recid': '/files'
},
}
"""Records files integration."""
FILES_REST_PERMISSION_FACTORY = \
'cc_vary_header.records.permissions:files_permission_factory'
"""Files-REST permissions factory."""
|
def first_derivative(xs, ys, mode='centered'):
if mode == 'forward':
x = xs[:-1]
y = ys[:-1]
x_next = xs[1:]
y_next = ys[1:]
derivs = (y_next - y) / (x_next - x)
elif mode == 'centered':
x1 = xs[:-2]
y1 = ys[:-2]
x2 = xs[1:-1]
y2 = ys[1:-1]
x3 = xs[2:]
y3 = ys[2:]
derivs = (
x1 ** 2 * y2 + x2 ** 2 * y1 - x1 ** 2 * y3 + x3 ** 2 * y1
- x2 ** 2 * y3 - x3 ** 2 * y2 - 2 * x1 * x2 * y2 + 2 * x1 * x2 * y3
- 2 * x2 * x3 * y1 + 2 * x2 * x3 * y2
) / (
(x1 - x2) * (x1 - x3) * (x2 - x3)
)
else:
raise Exception("Bad input for the ''mode'' argument.")
return derivs
def second_derivative(xs, ys, mode='centered'):
if mode == 'centered':
x1 = xs[:-2]
y1 = ys[:-2]
x2 = xs[1:-1]
y2 = ys[1:-1]
x3 = xs[2:]
y3 = ys[2:]
derivs = -2 * (
x1 * y2 - x2 * y1 - x1 * y3 + x3 * y1 + x2 * y3 - x3 * y2
) / (
(x1 - x2) * (x1 - x3) * (x2 - x3)
)
else:
raise Exception("Bad input for the ''mode'' argument.")
return derivs
if __name__ == "__main__":
import numpy as np
x = np.linspace(0, 1, 11)**2
y = x ** 2
dydx = first_derivative(x, y, mode='forward')
d2ydx2 = second_derivative(x, y)
import matplotlib.pyplot as plt
plt.plot(x, y, '.-')
|
# flake8: noqa
from .base import *
ALLOWED_HOSTS = ['127.0.0.1']
INTERNAL_IPS = ['127.0.0.1']
# Application definition
THIRD_PARTY_APPS += ()
LOCAL_APPS += ()
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'test_django_db',
'USER': 'test_django',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Media
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/test')
# Emails
DEFAULT_FROM_EMAIL = 'snicoper@snicoper.local'
# Admins
ADMINS = (
('snicoper', 'snicoper@snicoper.local'),
)
# Grupos de email.
GROUP_EMAILS = {
"NO-REPLY": 'no-responder@snicoper.local <snicoper@snicoper.local>',
'CONTACTS': (
'Salvador Nicolas <snicoper@snicoper.local>',
),
}
|
from rest_framework import serializers
from .models import Images, Post, Comment
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'title', 'content', 'create_date', 'update_date', 'image', 'user')
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'title', 'content', 'create_date', 'update_date', 'image', 'user')
class PostPutSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'title', 'content', 'create_date', 'update_date', 'image', 'user')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('id', 'content', 'create_date', 'update_date', 'post', 'user')
class CommentCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('id', 'content', 'create_date', 'update_date', 'post', 'user')
class CommentPutSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('id', 'content', 'create_date', 'update_date', 'post', 'user')
|
# Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from drivers.stream.common import StreamTransaction
from nxconstants import (ControlReqType, ControlRespType, ControlRequest,
ControlResponse, HW_DEV_ID, HW_VER_MAJOR, HW_VER_MINOR,
TIMER_WIDTH)
from ..testbench import testcase
@testcase()
async def read_params(dut):
""" Read the device parameters """
dut.info("Resetting the DUT")
await dut.reset()
# Queue up a parameter request
req = ControlRequest()
req.raw.command = ControlReqType.READ_PARAMS
dut.ctrl_in.append(StreamTransaction(req.raw.pack()))
# Queue up a parameter response
resp = ControlResponse()
resp.params.format = ControlRespType.PARAMS
resp.params.id = HW_DEV_ID
resp.params.ver_major = HW_VER_MAJOR
resp.params.ver_minor = HW_VER_MINOR
resp.params.timer_width = TIMER_WIDTH
resp.params.rows = int(dut.ROWS)
resp.params.columns = int(dut.COLUMNS)
resp.params.node_ins = int(dut.INPUTS)
resp.params.node_outs = int(dut.OUTPUTS)
resp.params.node_regs = int(dut.REGISTERS)
dut.exp_ctrl.append(StreamTransaction(resp.params.pack()))
|
from datetime import datetime
from typing import Generator
import pytest
from sqlalchemy.orm import Session
from app.database.models import Event, Invitation, User
from app.internal.utils import create_model, delete_instance
@pytest.fixture
def invitation(
event: Event, user: User, session: Session
) -> Generator[Invitation, None, None]:
"""Returns an Invitation object after being created in the database.
Args:
event: An Event instance.
user: A user instance.
session: A database connection.
Returns:
An Invitation object.
"""
invitation = create_model(
session, Invitation,
creation=datetime.now(),
recipient=user,
event=event,
event_id=event.id,
recipient_id=user.id,
)
yield invitation
delete_instance(session, invitation)
|
from django.contrib import admin
# Register your models here.
from .models import TopicTag, SkillTag, UserProfile
admin.site.register(TopicTag)
admin.site.register(SkillTag)
admin.site.register(UserProfile)
|
#!/usr/bin/env python3
import io
import os
import sys
import pyperclip
from PIL import Image
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QSystemTrayIcon,qApp,QMenu,QAction
from system_hotkey import SystemHotkey
from PyQt5.QtCore import QObject,pyqtSignal
# try:
# from pynotifier import Notification
# except ImportError:
# pass
#捐助
from donate import MyQrWidget as Qr
#切换baidu paddleocr
import numpy as np
from paddleocr import PaddleOCR
PADDLE_OCR = None
class Snipper(QtWidgets.QWidget):
#定义一个热键信号
sig_keyhot = pyqtSignal(str)
def __init__(self, parent=None, flags=Qt.WindowFlags()):
super().__init__(parent=parent, flags=flags)
self.hk_capture, self.hk_exit = SystemHotkey(),SystemHotkey()
self.hk_capture.register(('alt','c'),callback=lambda x:self.send_key_event("capture"))
self.hk_exit.register(('alt','q'),callback=lambda x:self.send_key_event("exit"))
self.sig_keyhot.connect(self.hotkey_process)
#热键信号发送函数(将外部信号,转化成qt信号)
def send_key_event(self,i_str):
self.sig_keyhot.emit(i_str)
def hotkey_process(self, i_str):
if i_str == "capture":
self.capture()
elif i_str == "exit":
self.quit()
elif i_str == "donate":
self.donate()
else:
pass
def donate(self):
self.donateWin = Qr()
self.donateWin.show()
global tp
tp.show()
def quit(self):
print(f"INFO: quit capture")
QtWidgets.QApplication.quit()
def capture(self):
print(f"INFO: start capture!")
self.setWindowFlags(
Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.Dialog
)
self.setWindowState(self.windowState() | Qt.WindowFullScreen)
self.show()
self.start, self.end = QtCore.QPoint(), QtCore.QPoint()
self.screen = QtWidgets.QApplication.screenAt(QtGui.QCursor.pos()).grabWindow(0)
palette = QtGui.QPalette()
palette.setBrush(self.backgroundRole(), QtGui.QBrush(self.screen))
self.setPalette(palette)
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))
pass
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
QtWidgets.QApplication.quit()
return super().keyPressEvent(event)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.setPen(Qt.NoPen)
painter.setBrush(QtGui.QColor(0, 0, 0, 100))
painter.drawRect(0, 0, self.width(), self.height())
if self.start == self.end:
return super().paintEvent(event)
painter.setPen(QtGui.QPen(QtGui.QColor(255, 255, 255), 3))
painter.setBrush(painter.background())
painter.drawRect(QtCore.QRect(self.start, self.end))
return super().paintEvent(event)
def mousePressEvent(self, event):
self.start = self.end = event.pos()
self.update()
return super().mousePressEvent(event)
def mouseMoveEvent(self, event):
self.end = event.pos()
self.update()
return super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
if self.start == self.end:
return super().mouseReleaseEvent(event)
self.hide()
QtWidgets.QApplication.processEvents()
shot = self.screen.copy(QtCore.QRect(self.start, self.end))
processImage_pdocr(shot)
#QtWidgets.QApplication.quit()
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
# def processImage(img):
# buffer = QtCore.QBuffer()
# buffer.open(QtCore.QBuffer.ReadWrite)
# img.save(buffer, "PNG")
# pil_img = Image.open(io.BytesIO(buffer.data()))
# buffer.close()
# try:
# #result = pytesseract.image_to_string(
# # pil_img, timeout=5, lang=(sys.argv[1] if len(sys.argv) > 1 else None)
# #)
# result = pytesseract.image_to_string(
# pil_img, timeout=8, lang="eng+chi_sim"
# )
# except RuntimeError as error:
# print(f"ERROR: An error occurred when trying to process the image: {error}")
# notify(f"An error occurred when trying to process the image: {error}")
# return
# if result:
# #result = ''.join(result.split(' '))
# pyperclip.copy(result)
# print(f'INFO: Copied "{result}" to the clipboard')
# #notify(f'Copied "{result}" to the clipboard')
# notify(f'识别结果已保存到剪贴板\n "{result}" ')
# else:
# print(f"INFO: Unable to read text from image, did not copy")
# notify(f"Unable to read text from image, did not copy")
def processImage_pdocr(img):
buffer = QtCore.QBuffer()
buffer.open(QtCore.QBuffer.ReadWrite)
img.save(buffer, "PNG")
pil_img = Image.open(io.BytesIO(buffer.data()))
buffer.close()
try:
global PADDLE_OCR
ocr_result = PADDLE_OCR.ocr(np.array(pil_img))
ocr_result = [line[1][0] for line in ocr_result]
result = '\n'.join(ocr_result)
except RuntimeError as error:
print(f"ERROR: An paddleocr error occurred when trying to process the image: {error}")
notify(f"An error paddleocr occurred when trying to process the image: {error}")
return
if result:
#result = ''.join(result.split(' '))
pyperclip.copy(result)
print(f'INFO: Copied "{result}" to the clipboard')
#notify(f'Copied "{result}" to the clipboard')
notify(f'识别结果已保存到剪贴板\n "{ len(result) > 0 and result[20:] or result }" ')
else:
print(f"INFO: Unable to read text from image, did not copy")
notify(f"Unable to read text from image, did not copy")
def notify(msg):
# try:
# Notification(title="scr2txt", description=msg, duration=5, icon_path=( os.path.dirname(os.path.abspath(__file__)) + '\\scr2txt.ico')).send()
# except (SystemError, NameError):
# trayicon = QtWidgets.QSystemTrayIcon(
# QtGui.QIcon(
# QtGui.QPixmap.fromImage(QtGui.QImage(1, 1, QtGui.QImage.Format_Mono))
# )
# )
# trayicon.show()
# trayicon.showMessage("scr2txt", msg, QtWidgets.QSystemTrayIcon.NoIcon)
# trayicon.hide()
global tp
tp.show()
tp.showMessage("scr2txt", msg, QtWidgets.QSystemTrayIcon.NoIcon)
#tp.hide()
if __name__ == "__main__":
# try:
# pytesseract.get_tesseract_version()
# except EnvironmentError:
# notify(
# "Tesseract is either not installed or cannot be reached.\n"
# "Have you installed it and added the install directory to your system path?"
# )
# print(
# "ERROR: Tesseract is either not installed or cannot be reached.\n"
# "Have you installed it and added the install directory to your system path?"
# )
# sys.exit()
#notify(u"\nAlt+C:开始识别\nAlt+Q:退出")
model_path = os.path.dirname(os.path.abspath(__file__)) + r'\\model\\'
PADDLE_OCR = PaddleOCR(use_gpu=False,det_model_dir=model_path+'det', cls_model_dir='', rec_model_dir=model_path+'rec', rec_char_dict_path=model_path+'ppocr_keys_v1.txt')
#QtWidgets.QApplication.setQuitOnLastWindowClosed(False)
QtCore.QCoreApplication.setAttribute(Qt.AA_DisableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
window = QtWidgets.QMainWindow()
snipper = Snipper(window)
snipper.show()
# 在系统托盘处显示图标
tp = QSystemTrayIcon(window)
tp.setIcon(QIcon('scr2txt.ico'))
tp.setToolTip(u'Alt+C截屏,Alt+Q退出')
capAct = QAction('截屏(&Caputure)',triggered = lambda x:snipper.send_key_event('capture'))
extAct = QAction('退出(&Quit)',triggered = lambda x: tp.setVisible(False) or snipper.send_key_event('exit'))
donAct = QAction('捐助(&Donate)',triggered = lambda x: tp.setVisible(False) or snipper.send_key_event('donate'))
tpMenu = QMenu()
tpMenu.addAction(capAct)
tpMenu.addAction(extAct)
tpMenu.addAction(donAct)
tp.setContextMenu(tpMenu)
tp.show()
notify(u"\nAlt+C:开始识别\nAlt+Q:退出")
sys.exit(app.exec_())
|
import torch
import numpy as np
from typing import Dict, Any, List
import math
import gym
from core.envs import BaseDriveEnv
from ding.torch_utils.data_helper import to_ndarray
DEFAULT_ACC_LIST = [
(0, 1),
(0.25, 0),
(0.75, 0),
]
DEFAULT_STEER_LIST = [
-0.8,
-0.5,
-0.2,
0,
0.2,
0.5,
0.8,
]
class DiscreteEnvWrapper(gym.Wrapper):
def __init__(self, env: BaseDriveEnv, acc_list: List = None, steer_list: List = None) -> None:
super().__init__(env)
self._acc_list = acc_list
if acc_list is None:
self._acc_list = DEFAULT_ACC_LIST
self._steer_list = steer_list
if steer_list is None:
self._steer_list = DEFAULT_STEER_LIST
def reset(self, *args, **kwargs) -> Any:
obs = super().reset(*args, **kwargs)
obs_out = {
'birdview': obs['birdview'][..., [0, 1, 5, 6, 8]],
'speed': (obs['speed'] / 25).astype(np.float32),
}
return obs_out
def step(self, id):
if isinstance(id, torch.Tensor):
id = id.item()
id = np.squeeze(id)
assert id < len(self._acc_list) * len(self._steer_list), (id, len(self._acc_list) * len(self._steer_list))
mod_value = len(self._acc_list)
acc = self._acc_list[id % mod_value]
steer = self._steer_list[id // mod_value]
action = {
'steer': steer,
'throttle': acc[0],
'brake': acc[1],
}
obs, reward, done, info = super().step(action)
obs_out = {
'birdview': obs['birdview'][..., [0, 1, 5, 6, 8]],
'speed': (obs['speed'] / 25).astype(np.float32),
}
return obs_out, reward, done, info
def __repr__(self) -> str:
return repr(self.env)
class MultiDiscreteEnvWrapper(gym.Wrapper):
def __init__(self, env: BaseDriveEnv, acc_list: List = None, steer_list: List = None) -> None:
super().__init__(env)
self._acc_list = acc_list
if acc_list is None:
self._acc_list = DEFAULT_ACC_LIST
self._steer_list = steer_list
if steer_list is None:
self._steer_list = DEFAULT_STEER_LIST
def reset(self, *args, **kwargs) -> Any:
obs = super().reset(*args, **kwargs)
obs_out = {
'birdview': obs['birdview'][..., [0, 1, 5, 6, 8]],
'speed': (obs['speed'] / 25).astype(np.float32),
}
return obs_out
def step(self, action_ids):
action_ids = to_ndarray(action_ids, dtype=int)
action_ids = np.squeeze(action_ids)
acc_id = action_ids[0]
steer_id = action_ids[1]
assert acc_id < len(self._acc_list), (acc_id, len(self._acc_list))
assert steer_id < len(self._steer_list), (steer_id, len(self._steer_list))
acc = self._acc_list[acc_id]
steer = self._steer_list[steer_id]
action = {
'steer': steer,
'throttle': acc[0],
'brake': acc[1],
}
obs, reward, done, info = super().step(action)
obs_out = {
'birdview': obs['birdview'][..., [0, 1, 5, 6, 8]],
'speed': (obs['speed'] / 25).astype(np.float32),
}
return obs_out, reward, done, info
def __repr__(self) -> str:
return repr(self.env)
class ContinuousEnvWrapper(gym.Wrapper):
def reset(self, *args, **kwargs) -> Any:
obs = super().reset(*args, **kwargs)
obs_out = {
'birdview': obs['birdview'][..., [0, 1, 5, 6, 8]],
'speed': (obs['speed'] / 25).astype(np.float32),
}
return obs_out
def step(self, action):
action = to_ndarray(action)
action = np.squeeze(action)
steer = action[0]
acc = action[1]
if acc > 0:
throttle, brake = acc, 0
else:
throttle, brake = 0, -acc
action = {
'steer': steer,
'throttle': throttle,
'brake': brake,
}
obs, reward, done, info = super().step(action)
obs_out = {
'birdview': obs['birdview'][..., [0, 1, 5, 6, 8]],
'speed': (obs['speed'] / 25).astype(np.float32),
}
return obs_out, reward, done, info
def __repr__(self) -> str:
return repr(self.env)
|
import sys
import serial
from time import sleep
path = '/dev/ttyUSB0'
baud = 115200
# OPTION A
# To disable /RESET after hangup
#
# import termios
# with open(path) as f:
# attrs = termios.tcgetattr(f)
# attrs[2] = attrs[2] & ~termios.HUPCL
# termios.tcsetattr(f, termios.TCSAFLUSH, attrs)
# OPTION B
# To disable /RESET after hangup
#
# import os
# os.system("stty -F /dev/ttyUSB0 -hupcl")
# OPTION C
# Disconnect the GRN pin from the FTDI interface
# Open serial port
ser = serial.Serial(path, baud, dsrdtr=False)
# Optionally check characteristics of serial port
# print(ser)
for line in sys.stdin:
for ch in line:
ser.write( ch );
ser.write('\r');
ser.flush();
sleep(0.01);
ser.close()
sys.exit()
|
from cstraining.web.rest.main import App
from cstraining.web.rest.model import StateColors
@App.path(path='state_colors/{obj_class}', model=StateColors)
def get_state_color_model(app, obj_class):
return StateColors(obj_class)
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The view layer of logic for our Bonjour Meal sample. The logic here
defines the behavior of the webhook when messages are received from
users messaging through Business Messages.
'''
import json
import uuid
from oauth2client.service_account import ServiceAccountCredentials
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import (
BusinessMessagesCarouselCard, BusinessMessagesCardContent, BusinessMessagesContentInfo,
BusinessMessagesDialAction, BusinessmessagesConversationsMessagesCreateRequest,
BusinessMessagesOpenUrlAction, BusinessMessagesMedia, BusinessMessagesMessage,
BusinessMessagesRepresentative, BusinessMessagesRichCard, BusinessMessagesStandaloneCard,
BusinessMessagesSuggestion, BusinessMessagesSuggestedAction, BusinessMessagesSuggestedReply)
# The location of the service account credentials
SERVICE_ACCOUNT_LOCATION = 'resources/bm-agent-service-account-credentials.json'
# Set of commands the bot understands
CMD_RICH_CARD = 'card'
CMD_CAROUSEL_CARD = 'carousel'
CMD_SUGGESTIONS = 'chips'
# Images used in cards and carousel examples
SAMPLE_IMAGES = [
'https://storage.googleapis.com/kitchen-sink-sample-images/cute-dog.jpg',
'https://storage.googleapis.com/kitchen-sink-sample-images/elephant.jpg',
'https://storage.googleapis.com/kitchen-sink-sample-images/adventure-cliff.jpg',
'https://storage.googleapis.com/kitchen-sink-sample-images/sheep.jpg',
'https://storage.googleapis.com/kitchen-sink-sample-images/golden-gate-bridge.jpg',
]
# The representative type that all messages are sent as
BOT_REPRESENTATIVE = BusinessMessagesRepresentative(
representativeType=BusinessMessagesRepresentative.RepresentativeTypeValueValuesEnum.BOT,
displayName='Echo Bot',
avatarImage='https://storage.googleapis.com/sample-avatars-for-bm/bot-avatar.jpg')
@csrf_exempt
def callback(request):
'''
Callback URL. Processes messages sent from user.
Args:
request (HttpRequest): The request object that django passes to the function
Returns:
An :HttpResponse: containing browser renderable HTML.
'''
if request.method == 'POST':
request_data = request.body.decode('utf8').replace("'", '"')
request_body = json.loads(request_data)
print('request_body: %s', request_body)
# Extract the conversation id and message text
conversation_id = request_body.get('conversationId')
print('conversation_id: %s', conversation_id)
# Check if we've seen this conversation before, if not create it.
# Check that the message and text body exist
if 'message' in request_body and 'text' in request_body['message']:
message = request_body['message']['text']
print('message: %s', message)
route_message(message, conversation_id)
elif 'suggestionResponse' in request_body:
message = request_body['suggestionResponse']['postbackData']
print('message: %s', message)
route_message(message, conversation_id)
elif 'userStatus' in request_body:
if 'isTyping' in request_body['userStatus']:
print('User is typing')
elif 'requestedLiveAgent' in request_body['userStatus']:
print('User requested transfer to live agent')
return HttpResponse('Response.')
return HttpResponse('This webhook expects a POST request.')
def route_message(message, conversation_id):
'''
Routes the message received from the user to create a response.
Args:
message (str): The message text received from the user.
conversation_id (str): The unique id for this user and agent.
'''
normalized_message = message.lower()
if normalized_message == CMD_RICH_CARD:
send_rich_card(conversation_id)
elif normalized_message == CMD_CAROUSEL_CARD:
send_carousel(conversation_id)
elif normalized_message == CMD_SUGGESTIONS:
send_message_with_suggestions(conversation_id)
else:
echo_message(message, conversation_id)
def send_rich_card(conversation_id):
'''
Sends a sample rich card to the user.
Args:
conversation_id (str): The unique id for this user and agent.
'''
fallback_text = ('Business Messages!!!\n\n'
+ 'This is an example rich card\n\n' + SAMPLE_IMAGES[0])
rich_card = BusinessMessagesRichCard(
standaloneCard=BusinessMessagesStandaloneCard(
cardContent=BusinessMessagesCardContent(
title='Business Messages!!!',
description='This is an example rich card',
suggestions=get_sample_suggestions(),
media=BusinessMessagesMedia(
height=BusinessMessagesMedia.HeightValueValuesEnum.MEDIUM,
contentInfo=BusinessMessagesContentInfo(
fileUrl=SAMPLE_IMAGES[0],
forceRefresh=False
))
)))
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
richCard=rich_card,
fallback=fallback_text)
send_message(message_obj, conversation_id)
def send_carousel(conversation_id):
'''
Sends a sample rich card to the user.
Args:
conversation_id (str): The unique id for this user and agent.
'''
rich_card = BusinessMessagesRichCard(carouselCard=get_sample_carousel())
fallback_text = ''
# Construct a fallback text for devices that do not support carousels
for card_content in rich_card.carouselCard.cardContents:
fallback_text += (card_content.title + '\n\n' + card_content.description
+ '\n\n' + card_content.media.contentInfo.fileUrl
+ '\n---------------------------------------------\n\n')
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
richCard=rich_card,
fallback=fallback_text)
send_message(message_obj, conversation_id)
def send_message_with_suggestions(conversation_id):
'''
Sends a message with a suggested replies.
Args:
conversation_id (str): The unique id for this user and agent.
'''
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text='Message with suggestions',
fallback='Your device does not support suggestions',
suggestions=get_sample_suggestions())
send_message(message_obj, conversation_id)
def echo_message(message, conversation_id):
'''
Sends the message received from the user back to the user.
Args:
message (str): The message text received from the user.
conversation_id (str): The unique id for this user and agent.
'''
message_obj = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BOT_REPRESENTATIVE,
text=message)
send_message(message_obj, conversation_id)
def send_message(message, conversation_id):
'''
Posts a message to the Business Messages API, first sending
a typing indicator event and sending a stop typing event after
the message has been sent.
Args:
message (obj): The message object payload to send to the user.
conversation_id (str): The unique id for this user and agent.
'''
credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_LOCATION,
scopes=['https://www.googleapis.com/auth/businessmessages'])
client = bm_client.BusinessmessagesV1(credentials=credentials)
# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
businessMessagesMessage=message,
parent='conversations/' + conversation_id)
bm_client.BusinessmessagesV1.ConversationsMessagesService(
client=client).Create(request=create_request)
def get_sample_carousel():
'''
Creates a sample carousel rich card.
Returns:
A :obj: A BusinessMessagesCarouselCard object with three cards.
'''
card_content = []
for i, sample_image in enumerate(SAMPLE_IMAGES):
card_content.append(BusinessMessagesCardContent(
title='Card #' + str(i),
description='This is a sample card',
suggestions=get_sample_suggestions(),
media=BusinessMessagesMedia(
height=BusinessMessagesMedia.HeightValueValuesEnum.MEDIUM,
contentInfo=BusinessMessagesContentInfo(
fileUrl=sample_image,
forceRefresh=False))))
return BusinessMessagesCarouselCard(
cardContents=card_content,
cardWidth=BusinessMessagesCarouselCard.CardWidthValueValuesEnum.MEDIUM)
def get_sample_suggestions():
'''
Creates a list of sample suggestions that includes a
suggested reply and two actions.
Returns:
A :list: A list of sample BusinessMessagesSuggestions.
'''
return [
BusinessMessagesSuggestion(
reply=BusinessMessagesSuggestedReply(
text='Sample Chip',
postbackData='sample_chip')
),
BusinessMessagesSuggestion(
action=BusinessMessagesSuggestedAction(
text='URL Action',
postbackData='url_action',
openUrlAction=BusinessMessagesOpenUrlAction(
url='https://www.google.com'))
),
BusinessMessagesSuggestion(
action=BusinessMessagesSuggestedAction(
text='Dial Action',
postbackData='dial_action',
dialAction=BusinessMessagesDialAction(
phoneNumber='+12223334444'))
),
]
def landing_placeholder(request):
'''
Creates an HttpResponse for a user browsing to the root of the deployed project.
Args:
request (HttpRequest): The request object that django passes to the function
Returns:
An :HttpResponse: containing browser renderable HTML.
'''
return HttpResponse('''
<h1>Welcome to the Bonjour Meal Codelab</h1>
<br/><br/>
To message your Bonjour Meal agent, go to the Developer Console and retrieve
the Test URLs for the agent you have created as described in the codelab
<a href='#'>here</a>.
''')
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Tests for DNNSampledSoftmaxClassifier estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import dnn_sampled_softmax_classifier
from tensorflow.python.ops import math_ops
class DNNSampledSoftmaxClassifierTest(tf.test.TestCase):
def testMultiClass(self):
"""Tests the following.
1. Tests fit() and evaluate() calls.
2. Tests the use of a non default optimizer.
3. Tests the output of get_variable_names().
Note that the training output is not verified because it is flaky with the
Iris dataset.
"""
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int64)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=1,
n_labels=1,
feature_columns=cont_features,
hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_fn, steps=5)
classifier.evaluate(input_fn=_iris_input_fn, steps=1)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 6)
def testNonDictFeatures(self):
"""Tests non-dictionary features runs without error."""
def _iris_input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return (tf.constant(
iris.data, dtype=tf.float32), tf.constant(
iris.target, shape=[150, 1], dtype=tf.int64))
cont_features = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=1,
n_labels=1,
feature_columns=cont_features,
hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_fn, steps=5)
classifier.evaluate(input_fn=_iris_input_fn, steps=1)
def testOneDimensionTargets(self):
"""Tests one dimensional targets runs without error."""
def _input_fn():
return {
'feature': tf.constant(
[1, 1, 1], dtype=tf.float32)
}, tf.constant(
[3, 5, 7], dtype=tf.int64)
cont_features = [
tf.contrib.layers.real_valued_column(
'feature', dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=10,
n_samples=1,
n_labels=1,
feature_columns=cont_features,
hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=5)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testWrongDimensionTargets(self):
"""Tests one dimensional targets runs without error."""
def _input_fn():
return {
'feature': tf.constant(
[1, 1, 1], dtype=tf.float32)
}, tf.constant(
[[[3, 5, 7]]], dtype=tf.int64)
cont_features = [
tf.contrib.layers.real_valued_column(
'feature', dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=10,
n_samples=1,
n_labels=1,
feature_columns=cont_features,
hidden_units=[3, 3])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'target'):
classifier.fit(input_fn=_input_fn, steps=5)
# TODO(dnivara): will analyze the flakiness.
# def testTrainWithPartitionedVariables(self):
# """Tests the following.
#
# 1. Tests training with partitioned variables.
# 2. Test that the model actually trains.
# 3. Tests the output of evaluate() and predict().
# "" #
# def _input_fn():
# features = {
# 'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
# indices=[[0, 0], [0, 1], [2, 0]],
# shape=[3, 2])
# }
# target = tf.constant([[1], [0], [0]], dtype=tf.int64)
# return features, target
#
# # The given hash_bucket_size results in variables larger than the
# # default min_slice_size attribute, so the variables are partitioned.
# sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
# 'language', hash_bucket_size=2e7)
# embedding_features = #
# tf.contrib.layers.embedding_column(sparse_column, dimension=1)
# ]
#
# classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier
#(
# n_classes=3,
# n_samples=2,
# n_labels=1,
# feature_columns=embedding_features,
# hidden_units=[4, #
# # Because we did not start a distributed cluster, we need to pass an
# # empty ClusterSpec, otherwise the device_setter will look for
# # distributed jobs, such as "/job:ps" which are not present.
# config=tf.contrib.learn.RunConfig(
# num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
# tf_random_seed=5))
#
# # Test that the model actually trains.
# classifier.fit(input_fn=_input_fn, steps=50)
# evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
# self.assertGreater(evaluate_output['precision_at_1'], 0.9)
# self.assertGreater(evaluate_output['recall_at_1'], 0.9)
#
# # Test the output of predict()
# predict_output = classifier.predict(input_fn=_input_fn)
# self.assertListEqual([1, 0, 0], list(predict_output))
#
def testTrainSaveLoad(self):
"""Tests that ensure that you can save and reload a trained model."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=10)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier1 = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
model_dir=model_dir,
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4])
classifier1.fit(input_fn=_input_fn, steps=1)
predict_output1 = classifier1.predict(input_fn=_input_fn)
del classifier1
classifier2 = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
model_dir=model_dir,
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4])
predict_output2 = classifier2.predict(input_fn=_input_fn)
self.assertEqual(list(predict_output1), list(predict_output2))
def testCustomOptimizerByObject(self):
"""Tests the use of custom optimizer."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.9)
self.assertGreater(evaluate_output['recall_at_1'], 0.9)
# Test the output of predict()
predict_output = classifier.predict(input_fn=_input_fn)
self.assertListEqual([1, 0, 0], list(predict_output))
def testCustomOptimizerByFunction(self):
"""Tests the use of custom optimizer."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
def _optimizer_exp_decay():
global_step = tf.contrib.framework.get_global_step()
learning_rate = tf.train.exponential_decay(learning_rate=0.01,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=_optimizer_exp_decay,
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.6)
self.assertGreater(evaluate_output['recall_at_1'], 0.6)
def testExport(self):
"""Tests that export model for servo works."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=100)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4])
export_dir = tempfile.mkdtemp()
classifier.fit(input_fn=_input_fn, steps=50)
classifier.export(export_dir)
def testPredictAsIterable(self):
"""Tests predict() and predict_proba() call with as_iterable set to True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[.9], [.1], [.1]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=feature_columns,
hidden_units=[4, 4])
classifier.fit(input_fn=_input_fn, steps=1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
# Test the output of predict() and predict_proba() with as_iterable=True
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(predictions,
np.argmax(predictions_proba, 1)))
def testCustomMetrics(self):
"""Tests the use of custom metric."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]], dtype=tf.int64)
return features, target
def _my_metric_op(predictions, targets):
"""Simply multiplies predictions and targets to return [1, 0 , 0]."""
prediction_classes = math_ops.argmax(predictions, 1)
return tf.mul(prediction_classes, tf.reshape(targets, [-1]))
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=1,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
# Test that the model actually trains.
classifier.fit(input_fn=_input_fn, steps=50)
metrics = {('my_metric', 'probabilities'): _my_metric_op}
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
metrics=metrics)
self.assertListEqual([1, 0, 0], list(evaluate_output['my_metric']))
def testMultiLabelTopKWithCustomMetrics(self):
"""Tests the cases where n_labels>1 top_k>1 and custom metrics on top_k."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[0, 1], [0, 1], [0, 1]], dtype=tf.int64)
return features, target
def _my_metric_op(predictions, targets):
"""Simply adds the predictions and targets."""
return tf.add(math_ops.to_int64(predictions), targets)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
embedding_features = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = dnn_sampled_softmax_classifier._DNNSampledSoftmaxClassifier(
n_classes=3,
n_samples=2,
n_labels=2,
top_k=2,
feature_columns=embedding_features,
hidden_units=[4, 4],
optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
config=tf.contrib.learn.RunConfig(tf_random_seed=5))
classifier.fit(input_fn=_input_fn, steps=50)
# evaluate() without custom metrics.
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(evaluate_output['precision_at_1'], 0.4)
self.assertGreater(evaluate_output['recall_at_1'], 0.4)
self.assertGreater(evaluate_output['precision_at_2'], 0.4)
self.assertGreater(evaluate_output['recall_at_2'], 0.4)
self.assertGreater(evaluate_output['average_precision_at_2'], 0.4)
# evaluate() with custom metrics.
metrics = {('my_metric', 'top_k'): _my_metric_op}
evaluate_output = classifier.evaluate(input_fn=_input_fn, steps=1,
metrics=metrics)
# This test's output is flaky so just testing that 'my_metric' is indeed
# part of the evaluate_output.
self.assertTrue('my_metric' in evaluate_output)
# predict() with top_k.
predict_output = classifier.predict(input_fn=_input_fn, get_top_k=True)
self.assertListEqual([3, 2], list(predict_output.shape))
# TODO(dnivara): Setup this test such that it is not flaky and predict() and
# evaluate() outputs can be tested.
if __name__ == '__main__':
tf.test.main()
|
import pytest
import taichi as ti
@ti.test(arch=ti.cpu)
def test_binary_op():
@ti.kernel
def bitwise_float():
a = 1
b = 3.1
c = a & b
with pytest.raises(SystemExit):
bitwise_float()
|
# ObjectID: 1000013
# Character field ID when accessed: 744000000
# ParentID: 9330193
# Object Position X: 623
# Object Position Y: 245
|
"""
This class is used to establish the db connection, retrieve the data required to populate the initial values in the
dropdowns of the aggregation levels and the changes in the data entities based on the choice of the aggregation levels.
"""
import sys
import os
import imp
import configparser as cp
sys.path.append('.')
class BaseDbUtil:
def __init__(self):
self.config = cp.ConfigParser()
self.config.read('config.txt')
self.app_home_path = self.config.get('app-home','app_home_path')
self.path_to_creds = self.config.get('app-home','path_to_creds')
self.app_home_path = "".join([self.app_home_path,self.path_to_creds])
self.db_creds_module = imp.load_compiled("db_creds_module",self.app_home_path)
self.filter = self.config.get('db-settings', 'filter')
self.levels = self.config.get('db-settings', 'levels')
self.hierarchy_table_name = self.config.get('db-settings', 'hierarchy_table_name')
self.date_column_name = self.config.get('db-settings', 'date_column_name')
self.entity_list = self.config.get('entity-settings', 'entity_list')
self.number_of_entities = self.config.get('entity-settings', 'number_of_entities')
# Create a method to get the drop down data and call this method in the app.py, then pass on the list in the html
# This method should be callable for all the levels which the user can choose from
# However, the data of the lower levels should be based on the data of the higher levels
def preparesqlquery(self, tablename=None, fieldname=None, datafor=None, entity=None, formdata={}, mode=None):
query = None
if datafor == 'dropdown':
if tablename is None or fieldname is None:
print('No table name or column name provided, cannot prepare SQL, will exit!')
else:
query = """ select distinct {0} from {1} order by {0} """.format(fieldname, tablename)
elif datafor == 'vertexjson':
query = self.config.get('entity-settings', entity)
if query is None:
print("No query provided, can't build vertices, will exit")
elif datafor == 'edgejson':
levels = self.levels.split(',')
whereclause = ''
groupby = ''
for i in levels:
print('level_' + i)
print('mode is ', mode)
if groupby != '':
if i == self.date_column_name:
groupby = groupby + """ , {0} """.format(i)
elif formdata['level_' + i] != 'N/A':
groupby = groupby + """ , {0} """.format(i)
else:
groupby = groupby
else:
if i == self.date_column_name:
groupby = groupby + """ {0} """.format(i)
elif formdata['level_' + i] != 'N/A':
groupby = groupby + """ {0} """.format(i)
else:
groupby = groupby
if whereclause != '':
if i == self.date_column_name:
if mode == 'Future':
whereclause = whereclause + """and {0} = '{1}'::date """.format(i, formdata[
'level_future_' + i])
else:
whereclause = whereclause + """and {0} = '{1}'::date """.format(i,
formdata['level_past_' + i])
elif formdata['level_' + i] != 'N/A':
whereclause = whereclause + """and {0} = '{1}' """.format(i, formdata['level_' + i])
else:
whereclause = whereclause
else:
if i == self.date_column_name:
if mode == 'Future':
whereclause = whereclause + """{0} = '{1}'::date """.format(i,
formdata['level_future_' + i])
else:
whereclause = whereclause + """{0} = '{1}'::date """.format(i, formdata['level_past_' + i])
elif formdata['level_' + i] != 'N/A':
whereclause = whereclause + """{0} = '{1}' """.format(i, formdata['level_' + i])
else:
whereclause = whereclause
query = """ select sum({0}) as {0} from {1} where {2} group by {3}""".format(fieldname, tablename,
whereclause, groupby)
return query
## This method won't be needed in an actual production scenario as loading data is not the focus of this application
## This method is present only to help by loading test data
def loadindb(self):
dbconn = self.getdbconnection()
if dbconn:
print('successfully connected to database')
## These table creations were for one time run only
#covidCasesDf.head(0).to_sql('t_covidCases', engine, if_exists='replace',index=False, schema='s_data')
#covidActivityDf.head(0).to_sql('t_covidActivity', engine, if_exists='replace', index=False, schema='s_data')
#print('successfully created tables')
print("truncating the table1 before loading")
dbconn.execute('truncate table s_data.t_covidCases')
os.system(f"sh dbUpload.sh 'localhost' 5432 'trackdatalineage' {self.username} {self.password} \
's_data.t_covidCases' 'COVID19Cases.csv'")
print('table1 uploaded')
print("truncating the table2 before loading")
dbconn.execute('truncate table s_data.t_covidActivity')
os.system(f"sh dbUpload.sh 'localhost' 5432 'trackdatalineage' {self.username} {self.password} \
's_data.t_covidActivity' 'COVID19Activity.csv'")
print('table2 uploaded')
else:
print('retry')
|
from textabstractor.about import __project_name__, __version__ # noqa F401
import pluggy
hookimpl = pluggy.HookimplMarker(__project_name__)
from textabstractor import app # noqa: E402, F401
from textabstractor import nlp # noqa: E402, F401
|
class Element:
E_LABEL = 1
E_GOTO = 2
E_SCALAR = 3
def __init__(self):
self.id = ""
self.type = -1
def constructor1(self, id, type) -> None:
self.id = id
self.type = type
def copy(self, copy):
if copy != None:
self.id = copy.id
self.type = copy.type
def __str__(self):
if self.id != None:
return self.id
return "" |
import os
from functools import wraps
from logger.get_logger import get_logger
logger = get_logger(__name__)
def save_request_id(lambda_handler):
@wraps(lambda_handler)
def set_request_id_to_environ(event, context):
try:
os.environ['LAMBDA_REQUEST_ID'] = context.aws_request_id
except Exception as e:
logger.warning(f'Exception occurred: {e}', exc_info=True)
return lambda_handler(event, context)
return set_request_id_to_environ
|
from typing import Optional, Union
from libcst import CSTTransformer, Comment, RemovalSentinel, SimpleStatementLine, BaseStatement, FlattenSentinel, \
MaybeSentinel, ClassDef, Name, FunctionDef, CSTNode, BaseSmallStatement, Assign, Attribute, AnnAssign, Import, \
Tuple, List, ImportFrom, ImportStar
from libcst.metadata import FunctionScope, ClassScope, ComprehensionScope, GlobalScope
from snakepack.analyzers.python.imports import ImportGraphAnalyzer
from snakepack.analyzers.python.scope import ScopeAnalyzer
from snakepack.transformers.python._base import PythonModuleTransformer
class RemoveUnreferencedCodeTransformer(PythonModuleTransformer):
REQUIRED_ANALYZERS = PythonModuleTransformer.REQUIRED_ANALYZERS + [
ScopeAnalyzer,
ImportGraphAnalyzer
]
class _CstTransformer(PythonModuleTransformer._CstTransformer):
def leave_FunctionDef(
self, original_node: FunctionDef, updated_node: FunctionDef
) -> Union[BaseStatement, FlattenSentinel[BaseStatement], RemovalSentinel]:
if not self._is_referenced(original_node, updated_node.name.value, assignment=False):
return RemovalSentinel.REMOVE
return updated_node
def leave_ClassDef(
self, original_node: ClassDef, updated_node: ClassDef
) -> Union[BaseStatement, FlattenSentinel[BaseStatement], RemovalSentinel]:
if not self._is_referenced(original_node, updated_node.name.value, assignment=False):
return RemovalSentinel.REMOVE
return updated_node
def leave_Assign(
self, original_node: Assign, updated_node: Assign
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
if len(updated_node.targets) > 1:
# don't touch multi-assignments (need type inference for reliably remove)
return updated_node
scope = self._analyses[ScopeAnalyzer].get_scope_for_node(original_node)
if not isinstance(updated_node.targets[0].target, Name) or isinstance(scope, ClassScope):
# don't touch attributes (references not reliably detected)
return updated_node
if not self._is_referenced(original_node.targets[0].target, updated_node.targets[0].target.value):
return RemovalSentinel.REMOVE
return updated_node
def leave_AnnAssign(
self, original_node: AnnAssign, updated_node: AnnAssign
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
scope = self._analyses[ScopeAnalyzer].get_scope_for_node(original_node)
if not isinstance(updated_node.target, Name) or isinstance(scope, ClassScope):
# don't touch attributes (references not reliably detected)
return updated_node
if not self._is_referenced(original_node.target, updated_node.target.value):
return RemovalSentinel.REMOVE
return updated_node
def leave_Import(
self, original_node: Import, updated_node: Import
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
updated_imports = []
for import_ in original_node.names:
if import_.asname is None:
imported_name = import_.name.value if isinstance(import_.name, Name) else import_.name.attr.value
else:
assert isinstance(import_.asname.name, Name)
imported_name = import_.asname.name.value
if self._is_referenced(import_.name, imported_name):
updated_imports.append(import_)
if len(updated_imports) > 0:
updated_imports[-1] = updated_imports[-1].with_changes(comma=MaybeSentinel.DEFAULT)
return updated_node.with_changes(names=updated_imports)
return RemovalSentinel.REMOVE
def leave_ImportFrom(
self, original_node: ImportFrom, updated_node: ImportFrom
) -> Union[BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel]:
if isinstance(updated_node.names, ImportStar):
# don't remove star imports
return updated_node
updated_imports = []
for import_ in original_node.names:
if import_.asname is None:
imported_name = import_.name.value if isinstance(import_.name, Name) else import_.name.attr.value
else:
assert isinstance(import_.asname.name, Name)
imported_name = import_.asname.name.value
if self._is_referenced(import_.name, imported_name):
updated_imports.append(import_)
if len(updated_imports) > 0:
updated_imports[-1] = updated_imports[-1].with_changes(comma=MaybeSentinel.DEFAULT)
return updated_node.with_changes(names=updated_imports)
return RemovalSentinel.REMOVE
def _is_referenced(self, node: CSTNode, identifier: str, assignment=True) -> bool:
if not assignment and self._analyses[ScopeAnalyzer].is_in_local_scope(node):
scope = self._analyses[ScopeAnalyzer].get_scope_for_node(node)
if identifier in scope.accesses:
# only remove unreferenced code in local scope
return True
return False
# fallback to assuming the code is referenced
return True
__config_name__ = 'remove_unreferenced_code'
|
from krita import DockWidgetFactory, DockWidgetFactoryBase
from .docker_template import DockerTemplate
DOCKER_ID = 'template_docker'
instance = Krita.instance()
dock_widget_factory = DockWidgetFactory(DOCKER_ID,
DockWidgetFactoryBase.DockRight,
DockerTemplate)
instance.addDockWidgetFactory(dock_widget_factory) |
from unetschema.schema import B58_CHARS, ADDRESS_CHECKSUM_LENGTH
from unetschema.hashing import double_sha256
from unetschema.error import InvalidAddress
def b58decode(v):
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += B58_CHARS.find(c) * (58 ** i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == B58_CHARS[0]:
nPad += 1
else:
break
return chr(0) * nPad + result
def b58encode(v):
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256 ** i) * ord(c)
result = ''
while long_value >= 58:
div, mod = divmod(long_value, 58)
result = B58_CHARS[mod] + result
long_value = div
result = B58_CHARS[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (B58_CHARS[0] * nPad) + result
def validate_b58_checksum(addr_bytes):
addr_without_checksum = addr_bytes[:-ADDRESS_CHECKSUM_LENGTH]
addr_checksum = addr_bytes[-ADDRESS_CHECKSUM_LENGTH:]
if double_sha256(addr_without_checksum)[:ADDRESS_CHECKSUM_LENGTH] != addr_checksum:
raise InvalidAddress("Invalid address checksum")
def b58decode_strip_checksum(v):
addr_bytes = b58decode(v)
validate_b58_checksum(addr_bytes)
return addr_bytes[:-ADDRESS_CHECKSUM_LENGTH]
def b58encode_with_checksum(addr_bytes):
addr_checksum = double_sha256(addr_bytes)[:ADDRESS_CHECKSUM_LENGTH]
return b58encode(addr_bytes + addr_checksum)
|
# -*- coding:utf-8 -*-
from src.Update.Conf.config import *
class ReadConfigFile():
"""
该类负责读取配置文件,属于实际操作类
"""
def __init__(self, fileName='./conf/main.ini'):
self.config = ConfigParser.ConfigParser()
self.fileName = fileName
def readConfigFile(self, configMainName, configSubName):
"""
:param configMainName:配置信息主属性名
:param configSubName:配置信息副属性名
:return:配置信息(str)
"""
try:
# 读取配置文件
self.config.readfp(open(self.fileName))
message = self.config.get(configMainName, configSubName)
# 打印debug日志
if DEBUG and SYSTEM_TOOLS_DEBUG:
print('{SYS}{MISSION_DEBUG} config has been load from file successfully')
return str(message)
except Exception as e:
# 打开错误日志文件
wrongFile = open('data/wrongMessage.dat', 'a+')
# 获取当前时间
currentTime = str(datetime.datetime.strptime(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()),
'%Y-%m-%d-%H-%M-%S'))
# 生成报错的错误信息
wrongMessage = {
'|currentTime': currentTime,
'|configMainName': configMainName,
'|configSubName': configSubName,
'|file': 'SystemTools-ConfFileRead-readConfigFile',
'|wrongMessage': str(e)
}
# 存入文件
wrongFile.write(str(wrongMessage))
# 增加换行符
wrongFile.write('\n')
wrongFile.close()
return None
# 配置文件读取测试
if __name__ == '__main__':
l = ReadConfigFile(fileName='F:\python17\pythonPro\MemortAssit\conf\main.ini')
print(l.readConfigFile('VERSION', 'version'))
|
#!/usr/local/bin/python3
import sys, re, json
import pathlib
import argparse
from ruamel.yaml import YAML
from os import path as path
from os import scandir as scandir
from os import remove as deleteFile
from collections import OrderedDict
here_path = path.dirname( path.abspath(__file__) )
this_script = re.sub(".py", ".yaml", path.basename( __file__ ) )
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
with open(path.join( here_path, "config", this_script), 'r') as c_f:
config = yaml.load( c_f )
"""podmd
The script converts files in directories between yaml <-> json
It requires the specification of input and output directories and file formats.
podmd"""
################################################################################
################################################################################
################################################################################
def _get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inpath", help="File or directory to be converted.", required=True)
parser.add_argument("-o", "--outpath", help="Directory to be saved to.", required=True)
parser.add_argument("-t", "--filetype", help="File type to convert from; for directory parsing.", required=True)
parser.add_argument("-x", "--exporttype", help="File type to convert to.", required=True)
parser.add_argument("-v", "--verbose", help="Show conversion details.", default=False)
return parser.parse_args()
################################################################################
def main():
args = _get_args()
in_path = pathlib.Path(args.inpath)
out_path = pathlib.Path(args.outpath)
from_type = args.filetype
to_type = args.exporttype
config.update({ "args": args, "from_type": from_type, "to_type": to_type} )
for p in [in_path, out_path]:
if not p.is_dir():
print("¡¡¡ WARNING: No directory in {}!!!".format(p))
exit()
for t in [from_type, to_type]:
if t not in config["supported_types"]:
print("¡¡¡ WARNING: Only {} are supported!!!".format(join(config["supported_types"])))
exit()
convert_dir(in_path, out_path, from_type, to_type, config)
################################################################################
def convert_dir(in_path, out_path, from_type, to_type, config):
pathlib.Path(out_path).mkdir( exist_ok=True )
if config["delete_existing_files"] is True:
for f in scandir(out_path):
if to_type in pathlib.Path(f).suffix and f.is_file():
deleteFile(f)
fs = [ f.name for f in scandir(in_path) if f.is_file() ]
fs = [ f for f in fs if f.endswith(from_type) ]
conversion_method = "_{}2{}".format(from_type, to_type)
for f_n in fs:
eval(conversion_method)(f_n, in_path, out_path, config)
d_s = [ d.name for d in scandir(in_path) if d.is_dir() ]
for d in d_s:
in_d = path.join( in_path, d )
out_d = path.join( out_path, d )
convert_dir(in_d, out_d, from_type, to_type, config)
################################################################################
def _yaml2json(f_n, in_path, out_path, config):
in_file = path.join( in_path, f_n)
o_n = re.sub(r"\.yaml", ".json", f_n)
out_file = path.join( out_path, o_n)
_file_conversion_message(config, in_file, out_file)
i_d = _file_read_and_clean(in_file, config)
try:
s = yaml.load( i_d )
except Exception as e:
print("\n¡¡¡¡¡ ###########################\n{}".format(in_file))
print(e)
print("########################### !!!!!\n")
return()
_par_replace(s, config)
with open(out_file, 'w') as out_f:
out_f.write(json.dumps(OrderedDict(s), indent=4, sort_keys=True, default=str))
################################################################################
def _yaml2yaml(f_n, in_path, out_path, config):
in_file = path.join( in_path, f_n)
o_n = f_n
out_file = path.join( out_path, f_n)
_file_conversion_message(config, in_file, out_file)
i_d = _file_read_and_clean(in_file, config)
try:
s = yaml.load( i_d )
except Exception as e:
_file_conversion_error(e, in_file)
return
_par_replace(s, config)
with open(out_file, 'w') as out_f:
yaml.dump(s, out_f)
################################################################################
def _json2yaml(f_n, in_path, out_path, config):
in_file = path.join( in_path, f_n)
o_n = re.sub(r"\.json", ".yaml", f_n)
out_file = path.join( out_path, o_n)
_file_conversion_message(config, in_file, out_file)
i_d = _file_read_and_clean(in_file, config)
try:
s = json.loads(i_d)
except Exception as e:
_file_conversion_error(e, in_file)
return
_par_replace(s, config)
with open(out_file, 'w') as out_f:
yaml.dump(s, out_f)
################################################################################
def _json2json(f_n, in_path, out_path, config):
in_file = path.join( in_path, f_n)
o_n = f_n
out_file = path.join( out_path, o_n)
_file_conversion_message(config, in_file, out_file)
i_d = _file_read_and_clean(in_file, config)
try:
s = json.loads(i_d)
except Exception as e:
_file_conversion_error(e, in_file)
return
_par_replace(s, config)
with open(out_file, 'w') as out_f:
yaml.dump(s, out_f)
################################################################################
def _file_read_and_clean(in_file, config):
with open(in_file, 'r') as in_f:
f_t = in_f.read()
in_f.close()
f_t = _text_replace(f_t, config)
return f_t
################################################################################
def _text_replace(text, config):
for r in config["replace_text"]:
text = text.replace(r["from"], r["to"])
return text
################################################################################
def _par_replace(schema, config):
for r, rv in config["replace_vals"].items():
if r in schema:
schema.update({r: rv["replaceValue"]})
else:
if "add" in r:
if rv["add"] is True:
schema.update({r: rv["replaceValue"]})
return schema
################################################################################
def _file_conversion_error(e, f):
print("\n¡¡¡¡¡ ######################################################\n{}".format(f))
print(e)
print("###################################################### !!!!!\n")
return
################################################################################
def _file_conversion_message(config, in_file, out_file):
if config["args"].verbose is False:
return
print("converting {}\n => {}".format(in_file, out_file))
return
################################################################################
################################################################################
################################################################################
if __name__ == '__main__':
main( )
|
from calendar import monthrange, month_name
from datetime import date
from django import template
register = template.Library()
@register.simple_tag
def format_timespan(month, year):
if date.today().month == int(month) and date.today().year == int(year):
return '{} 1 - {} {}, {}'.format(
month_name[int(month)], month_name[int(month)], date.today().day, year
)
return '{} 1 - {} {}, {}'.format(
month_name[int(month)], month_name[int(month)], monthrange(int(year), int(month))[1], year
)
@register.filter
def modulo_10(value):
return int(value) % 10
@register.filter
def div_10(value):
return (int(value) / 10) + 1
@register.filter
def is_current_month(month, year):
if (date.today().month == int(month) and date.today().year == int(year)) or (date.today().year == int(year) and date.today().month == int(month) + 1 and date.today().day < 8):
return True
return False
|
from django.contrib import admin
from .models import ExerciseTemplate, Exercise, WorkloadTemplate, Workload, SetTemplate, Set
class ExerciseTemplateAdmin(admin.ModelAdmin):
pass
class ExerciseAdmin(admin.ModelAdmin):
pass
class WorkloadTemplateAdmin(admin.ModelAdmin):
pass
class WorkloadAdmin(admin.ModelAdmin):
pass
class SetTemplateAdmin(admin.ModelAdmin):
pass
class SetAdmin(admin.ModelAdmin):
pass
admin.site.register(ExerciseTemplate, ExerciseTemplateAdmin)
admin.site.register(Exercise, ExerciseAdmin)
admin.site.register(WorkloadTemplate, WorkloadTemplateAdmin)
admin.site.register(Workload, WorkloadAdmin)
admin.site.register(SetTemplate, SetTemplateAdmin)
admin.site.register(Set, SetAdmin)
|
import abc
import random
# Python 3.5
class Tombola(abc.ABC):
""" Globo de bingo
- Carregar itens
- Inspecionar itens
- Saber se está vazia
- Sortear itens
- Misturar itens
A tombola deve armazenar seus itens num atributo chamado itens.
"""
@abc.abstractmethod
def carrega(self, itens):
""" Adiciona itens a partir de um iterável. """
@abc.abstractmethod
def sorteia(self):
""" Remove o último item
Deve levantar um `LookupError` quando a instancia estiver vazia.
"""
def vazia(self):
return bool(self.inspeciona())
def inspeciona(self):
itens = []
while True:
try:
itens.append(self.sorteia())
except LookupError:
break
self.carrega(itens)
return tuple(itens)
class GaiolaBingo(Tombola):
def __init__(self, itens):
self.itens = list()
self.carrega(itens)
def carrega(self, itens):
self.itens.extend(itens)
def mistura(self, itens):
random.shuffle(self.itens)
def sorteia(self):
return self.itens.pop()
|
from src.core.functions.shell import *
class Generator(object):
"""
Description:
Instance of this class will generate
various stuff depending on the requirements,
as of right now, it can generate :
- Directory Structure (with empty files)
"""
def __init__(self):
super(Generator, self).__init__()
def generate(self, what = "structure", src = "", name = ""):
"""
-------------------------------------
Interface for various generating
functions described here or maybe in
other files.
-------------------------------------
args => None
kwargs => what, src
-------------------------------------
"""
if (src != "") and (name != "") and (what == "structure"):
pwd = os.path.abspath(os.getcwd())
take(name)
self.structure(src)
cd(pwd)
# <---------------------------->
def structure(self, yaml):
"""
-------------------------------------
Generates directory structure with
emptry files depending on the YAML
based directory structure that it
gets.
All keys in yaml are directories
and all array elements are files.
-------------------------------------
args => yaml
kwargs => None
-------------------------------------
"""
for i in yaml:
if type(i) is dict:
for key, value in i.items():
if value is not None:
pwd = os.path.abspath(os.getcwd())
take(key)
self.structure(value)
cd(pwd)
else:
mkdir(key)
if type(i) is list:
for item in i:
pwd = os.path.abspath(os.getcwd())
self.structure(item)
cd(pwd)
if type(i) is str:
touch(i)
|
#!/usr/bin/env python
import sys
import numpy as np
import time
from optparse import OptionParser
import logging
def normalize(A):
column_sums = A.sum(axis=0)
new_matrix = A / column_sums[np.newaxis, :]
return new_matrix
def inflate(A, inflate_factor):
return normalize(np.power(A, inflate_factor))
def expand(A, expand_factor):
return np.linalg.matrix_power(A, expand_factor)
def add_diag(A, mult_factor):
return A + mult_factor * np.identity(A.shape[0])
def get_clusters(A):
clusters = []
for i, r in enumerate((A>0).tolist()):
if r[i]:
clusters.append(A[i,:]>0)
clust_map ={}
for cn , c in enumerate(clusters):
for x in [ i for i, x in enumerate(c) if x ]:
clust_map[cn] = clust_map.get(cn, []) + [x]
return clust_map
def draw(G, A, cluster_map):
import networkx as nx
import matplotlib.pyplot as plt
clust_map = {}
for k, vals in cluster_map.items():
for v in vals:
clust_map[v] = k
colors = []
for i in range(len(G.nodes())):
colors.append(clust_map.get(i, 100))
pos = nx.spring_layout(G)
from matplotlib.pylab import matshow, show, cm
plt.figure(2)
nx.draw_networkx_nodes(G, pos,node_size = 200, node_color =colors , cmap=plt.cm.Blues )
nx.draw_networkx_edges(G,pos, alpha=0.5)
matshow(A, fignum=1, cmap=cm.gray)
plt.show()
show()
def stop(M, i):
if i%5==4:
m = np.max( M**2 - M) - np.min( M**2 - M)
if m==0:
logging.info("Stop at iteration %s" % i)
return True
return False
def mcl(M, expand_factor = 2, inflate_factor = 2, max_loop = 10 , mult_factor = 1):
M = add_diag(M, mult_factor)
M = normalize(M)
for i in range(max_loop):
logging.info("loop %s" % i)
M = inflate(M, inflate_factor)
M = expand(M, expand_factor)
if stop(M, i): break
clusters = get_clusters(M)
return M, clusters
def networkx_mcl(G, expand_factor = 2, inflate_factor = 2, max_loop = 10 , mult_factor = 1):
import networkx as nx
A = nx.adjacency_matrix(G)
return mcl(np.array(A.todense()), expand_factor, inflate_factor, max_loop, mult_factor)
def print_info(options):
print("-" * 60)
print("MARKOV CLUSTERING:")
print("-" * 60)
print(" expand_factor: %s" % options.expand_factor)
print(" inflate_factor: %s" % options.inflate_factor)
print(" mult factor: %s" % options.mult_factor)
print(" max loops: %s\n" % options.max_loop)
def get_options():
usage = "usage: %prog [options] <input_matrix>"
parser = OptionParser(usage)
parser.add_option("-e", "--expand_factor",
dest="expand_factor",
default=2,
type=int,
help="expand factor (default: %default)")
parser.add_option("-i", "--inflate_factor",
dest="inflate_factor",
default=2,
type=float,
help="inflate factor (default: %default)")
parser.add_option("-m", "--mult_factor",
dest="mult_factor",
default=2,
type=float,
help="multiply factor (default: %default)")
parser.add_option("-l", "--max_loops",
dest="max_loop",
default=60,
type=int,
help="max loops (default: %default)")
parser.add_option("-o", "--output", metavar="FILE",
help="output (default: stdout)")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=True,
help="verbose (default: %default)")
parser.add_option("-d", "--draw-graph",
action="store_true", dest="draw", default=False,
help="show graph with networkx (default: %default)")
(options, args) = parser.parse_args()
try:
filename = args[0]
except:
raise Exception('input', 'missing input filename')
return options, filename
def get_graph(csv_filename):
import networkx as nx
M = []
for r in open(csv_filename):
r = r.strip().split(",")
M.append(list(map(lambda x: float(x.strip()), r)))
G = nx.from_numpy_matrix(np.matrix(M))
return np.array(M), G
def clusters_to_output(clusters, options):
if options.output and len(options.output)>0:
f = open(options.output, 'w')
for k, v in clusters.items():
f.write("%s|%s\n" % (k, ", ".join(map(str, v)) ))
f.close()
else:
print("Clusters:")
for k, v in clusters.items():
print('{}, {}'.format(k, v))
if __name__ == '__main__':
options, filename = get_options()
print_info(options)
M, G = get_graph(filename)
print(" number of nodes: %s\n" % M.shape[0])
print("{}: {}".format(time.time(), "evaluating clusters..."))
M, clusters = networkx_mcl(G, expand_factor = options.expand_factor,
inflate_factor = options.inflate_factor,
max_loop = options.max_loop,
mult_factor = options.mult_factor)
print("{}: {}".format(time.time(), "done\n"))
clusters_to_output(clusters, options)
if options.draw:
print("{}: {}".format(time.time(), "drawing..."))
draw(G, M, clusters)
print("{}: {}".format(time.time(), "done"))
|
# *-* coding: utf-8 *-
import requests
import re
from bs4 import BeautifulSoup
import traceback
def crawl():
print('hello')
stock_index_url = 'http://quote.eastmoney.com/center/gridlist.html#hs_a_board'
stock_info_base_url = 'https://gupiao.baidu.com/stock/'
stock_code_list = get_stock_list(stock_index_url)
if (stock_code_list is None) or len(stock_code_list) == 0:
return
stock_code = stock_code_list[0]
stock_url = stock_info_base_url + stock_code + '.html'
stock_info = get_stock_info(stock_url)
# for stock_code in stock_code_list:
# stock_url = stock_info_base_url + stock_code + '.html'
# stock_info = get_stock_info(stock_url)
# 获取股票页面信息
def get_html_content_text(url, encode='utf-8'):
try:
res = requests.get(url)
res.raise_for_status()
res.encoding = encode
return res.text
except:
return ''
# 获取股票列表
def get_stock_list(stock_index_url):
index_html = get_html_content_text(stock_index_url, 'utf-8')
soup = BeautifulSoup(index_html, 'html.parser')
tag_a_list = soup.find_all('a')
stock_list = []
for tag_a in tag_a_list:
try:
href = tag_a.attrs['href']
stock_list.append(re.findall(r'[s][hz]\d\{6\}', href)[0])
except:
continue
return stock_list
# 获取股票的信息
def get_stock_info(stock_url):
stock_html = get_html_content_text(stock_url)
try:
if stock_html == '':
return None
return {}
except:
return None
if __name__ == '__main__':
crawl() |
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior
# * Remove `` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
# from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
# Generate tokens for every user upon save
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class Actor(models.Model):
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
last_update = models.DateTimeField()
class Meta:
db_table = 'actors'
class Album(models.Model):
title = models.CharField(max_length=160)
artist = models.ForeignKey('Artist', on_delete=models.CASCADE)
class Meta:
db_table = 'albums'
def __str__(self):
return self.title
class Artist(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
class Meta:
db_table = 'artists'
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=25)
last_update = models.DateTimeField()
class Meta:
db_table = 'categories'
class Customer(models.Model):
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=20)
company = models.CharField(max_length=80, blank=True, null=True)
address = models.CharField(max_length=70, blank=True, null=True)
city = models.CharField(max_length=40, blank=True, null=True)
state = models.CharField(max_length=40, blank=True, null=True)
country = models.CharField(max_length=40, blank=True, null=True)
postal_code = models.CharField(max_length=10, blank=True, null=True)
phone = models.CharField(max_length=24, blank=True, null=True)
fax = models.CharField(max_length=24, blank=True, null=True)
email = models.CharField(max_length=60)
support_rep = models.ForeignKey('Employee', on_delete=models.CASCADE, blank=True, null=True)
class Meta:
db_table = 'customers'
class Employee(models.Model):
last_name = models.CharField(max_length=20)
first_name = models.CharField(max_length=20)
title = models.CharField(max_length=30, blank=True, null=True)
reports_to = models.ForeignKey('self', on_delete=models.CASCADE, db_column='reports_to', blank=True, null=True)
birth_date = models.DateTimeField(blank=True, null=True)
hire_date = models.DateTimeField(blank=True, null=True)
address = models.CharField(max_length=70, blank=True, null=True)
city = models.CharField(max_length=40, blank=True, null=True)
state = models.CharField(max_length=40, blank=True, null=True)
country = models.CharField(max_length=40, blank=True, null=True)
postal_code = models.CharField(max_length=10, blank=True, null=True)
phone = models.CharField(max_length=24, blank=True, null=True)
fax = models.CharField(max_length=24, blank=True, null=True)
email = models.CharField(max_length=60, blank=True, null=True)
class Meta:
db_table = 'employees'
class Film(models.Model):
title = models.CharField(max_length=255)
description = models.TextField(blank=True, null=True)
release_year = models.IntegerField(blank=True, null=True)
language_id = models.SmallIntegerField()
rental_duration = models.SmallIntegerField()
rental_rate = models.DecimalField(max_digits=4, decimal_places=2)
length = models.SmallIntegerField(blank=True, null=True)
replacement_cost = models.DecimalField(max_digits=5, decimal_places=2)
rating = models.TextField(blank=True, null=True)
last_update = models.DateTimeField()
special_features = models.TextField(blank=True, null=True) # This field type is a guess.
fulltext = models.TextField() # This field type is a guess.
class Meta:
db_table = 'films'
class Genre(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
class Meta:
db_table = 'genres'
def __str__(self):
return self.name
class MediaType(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
class Meta:
db_table = 'media_types'
def __str__(self):
return self.name
class PlaylistTrack(models.Model):
playlist = models.OneToOneField('Playlist', models.DO_NOTHING, primary_key=True)
track = models.ForeignKey('Track', on_delete=models.CASCADE)
class Meta:
db_table = 'playlist_track'
unique_together = (('playlist', 'track'),)
class Playlist(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
class Meta:
db_table = 'playlists'
class Track(models.Model):
name = models.CharField(max_length=200)
album = models.ForeignKey(Album, on_delete=models.CASCADE, blank=True, null=True)
media_type = models.ForeignKey(MediaType, on_delete=models.CASCADE)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE, blank=True, null=True)
composer = models.CharField(max_length=220, blank=True, null=True)
milliseconds = models.IntegerField()
bytes = models.IntegerField(blank=True, null=True)
unit_price = models.DecimalField(max_digits=10, decimal_places=2)
class Meta:
db_table = 'tracks'
def __str__(self):
return self.name
|
from .action_scheme import ActionScheme, DTypeString, TradeActionUnion
from .continuous_actions import ContinuousActions
from .discrete_actions import DiscreteActions
from .multi_discrete_actions import MultiDiscreteActions
# 交易动作字典
_registry = {
'continuous': ContinuousActions,
'discrete': DiscreteActions,
'multi-discrete': MultiDiscreteActions,
}
def get(identifier: str) -> ActionScheme:
"""Gets the `ActionScheme` that matches with the identifier.
通过identifier标识,获取相应的动作方案
Arguments:
identifier: The identifier for the `ActionScheme`
Raises:
KeyError: if identifier is not associated with any `ActionScheme`
"""
if identifier not in _registry.keys():
raise KeyError(f'Identifier {identifier} is not associated with any `ActionScheme`.')
return _registry[identifier]()
|
import tensorflow as tf
IDENTIFIER_OUTPUT_LAYER = "Output"
def get_out(output_layer: str, out_feature_dim, scale_node_size, name: str = 'decoder'):
if output_layer == "gaussian":
output_decoder_layer = GaussianOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"Gaussian{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
elif output_layer == "nb":
output_decoder_layer = NegBinOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"NegBin{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
elif output_layer == "nb_shared_disp":
output_decoder_layer = NegBinSharedDispOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"NegBinSharedDisp{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
elif output_layer == "nb_const_disp":
output_decoder_layer = NegBinConstDispOutput(
original_dim=out_feature_dim,
use_node_scale=scale_node_size,
name=f"NegBinConstDisp{IDENTIFIER_OUTPUT_LAYER}_{name}",
)
else:
raise ValueError("tried to access a non-supported output layer %s" % output_layer)
return output_decoder_layer
class LinearOutput(tf.keras.layers.Layer):
"""Linear output layer."""
def __init__(self, use_node_scale: bool = False, name: str = "linear_output", **kwargs):
"""Initialize LinearOutput.
Parameters
----------
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.use_node_scale = use_node_scale
self.var_bias = None
def get_config(self):
"""Get config LinearOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build LinearOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
genes_dim = input_shapes[0][-1]
self.var_bias = self.add_weight("var_bias", shape=[1, genes_dim], initializer="zeros")
def call(self, inputs, **kwargs):
"""Call LinearOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
mean, sf = inputs
var = self.var_bias
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
var = tf.zeros_like(mean) + var # broadcast
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class LinearConstDispOutput(tf.keras.layers.Layer):
"""Linear output layer with constant dispersion."""
def __init__(self, use_node_scale: bool = False, name: str = "linear_const_disp_output", **kwargs):
"""Initialize LinearConstDispOutput.
Parameters
----------
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.use_node_scale = use_node_scale
self.var_bias = None
def get_config(self):
"""Get config LinearConstDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def call(self, inputs, **kwargs):
"""Call LinearConstDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
mean, sf = inputs
var = tf.zeros_like(mean)
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
var = tf.zeros_like(mean) + var
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class GaussianOutput(tf.keras.layers.Layer):
"""Log normal likelihood output layer."""
def __init__(self, original_dim=None, use_node_scale: bool = False, name: str = "gaussian_output", **kwargs):
"""Initialize GaussianOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var_bias = None
def get_config(self):
"""Get config GaussianOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build GaussianOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
self.var_bias = self.add_weight("var_bias", shape=[1, self.original_dim], initializer="zeros")
def call(self, inputs, **kwargs):
"""Call GaussianOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = self.var_bias
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
var = tf.zeros_like(mean) + var # broadcast
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class GaussianConstDispOutput(tf.keras.layers.Layer):
"""Log normal likelihood output layer."""
def __init__(self, original_dim=None, use_node_scale: bool = False, name: str = "gaussian_output", **kwargs):
"""Initialize GaussianConstDispOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
def get_config(self):
"""Get config GaussianConstDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build GaussianConstDispOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
def call(self, inputs, **kwargs):
"""Call GaussianConstDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
eta_loc
eta_scale
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
var = tf.zeros_like(mean)
if self.use_node_scale:
mean = mean * tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip")
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -tf.exp(bound), tf.exp(bound), "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
# exp_mean = mean_clip + sf
eta_loc = mean_clip
eta_scale = tf.exp(var_clip)
return [eta_loc, eta_scale]
class NegBinOutput(tf.keras.layers.Layer):
"""Negative binomial output layer."""
def __init__(self, original_dim=None, use_node_scale: bool = False, name: str = "neg_bin_output", **kwargs):
"""Initialize NegBinOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var = None
def get_config(self):
"""Get config NegBinOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build NegBinOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
self.var = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
def call(self, inputs, **kwargs):
"""Call NegBinOutput.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
exp_mean
exp_var
"""
bound = 60.0
activation, sf = inputs
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = self.var(activation)
if self.use_node_scale:
mean = mean + tf.math.log(tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip"))
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
exp_mean = tf.exp(mean_clip)
exp_var = tf.exp(var_clip)
return [exp_mean, exp_var]
class NegBinSharedDispOutput(tf.keras.layers.Layer):
"""Negative binomial output layer with dispersion shared over features."""
def __init__(
self, original_dim=None, use_node_scale: bool = False, name: str = "neg_bin_shared_disp_output", **kwargs
):
"""Initialize NegBinSharedDispOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var = None
self.var_bias = None
def get_config(self):
"""Get config NegBinSharedDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build NegBinSharedDispOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
self.var_bias = self.add_weight("var_bias", shape=[1, self.original_dim], initializer="zeros")
def call(self, inputs, **kwargs):
"""Call NegBinSharedDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
exp_mean
exp_var
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = self.var_bias
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
if self.use_node_scale:
mean = mean + tf.math.log(tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip"))
var = tf.zeros_like(mean) + var # broadcast
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
exp_mean = tf.exp(mean_clip)
exp_var = tf.exp(var_clip)
return [exp_mean, exp_var]
class NegBinConstDispOutput(tf.keras.layers.Layer):
"""Negative binomial output layer with constant dispersion."""
def __init__(
self, original_dim=None, use_node_scale: bool = False, name: str = "neg_bin_const_disp_output", **kwargs
):
"""Initialize NegBinConstDispOutput.
Parameters
----------
original_dim
original dimension.
use_node_scale : bool
Use node scale.
name : str
Layer name.
kwargs
Arbitrary keyword arguments.
"""
super().__init__(name=name, **kwargs)
self.original_dim = original_dim
self.intermediate_dim = None
self.use_node_scale = use_node_scale
self.means = None
self.var = None
def get_config(self):
"""Get config NegBinConstDispOutput.
Returns
-------
config
"""
config = super().get_config().copy()
config.update({"original_dim": self.original_dim, "use_node_scale": self.use_node_scale})
return config
def build(self, input_shapes):
"""Build NegBinConstDispOutput layer.
Parameters
----------
input_shapes
Input shapes.
"""
input_shape = input_shapes[0]
self.intermediate_dim = input_shape[2]
self.means = tf.keras.layers.Dense(units=self.original_dim, use_bias=True, activation="linear")
def call(self, inputs, **kwargs):
"""Call NegBinConstDispOutput layer.
Parameters
----------
inputs
Inputs.
kwargs
Arbitrary keyword arguments.
Returns
-------
exp_mean
exp_var
"""
bound = 60.0
activation, sf = inputs
in_node_dim = activation.shape[1]
activation = tf.reshape(activation, [-1, self.intermediate_dim], name="output_layer_reshape_activation_fwdpass")
mean = self.means(activation)
var = tf.zeros_like(mean)
mean = tf.reshape(mean, [-1, in_node_dim, self.original_dim], name="output_layer_reshape_mean")
if self.use_node_scale:
mean = mean + tf.math.log(tf.clip_by_value(sf, tf.exp(-bound), tf.exp(bound), "decoder_sf_clip"))
var = tf.zeros_like(mean) + var # broadcast
# clip to log of largest values supported by log operation
mean_clip = tf.clip_by_value(mean, -bound, bound, "decoder_clip")
var_clip = tf.clip_by_value(var, -bound, bound, "decoder_clip")
exp_mean = tf.exp(mean_clip)
exp_var = tf.exp(var_clip)
return [exp_mean, exp_var]
|
#!/usr/bin/env python
# The adjacent candidate segments were joined and the boundary is relocated by the outmost ORF if an ORF intersects with a segment.
# Note: the segment will only be extended
# For features: sum the numbers; recompute the percentages
#
# Author: Bingxin Lu
# Affiliation : National University of Singapore
# E-mail : bingxin@comp.nus.edu.sg
#
# Input:
# A set of intervals
# A list of gene positions (optional)
# The genome sequence file (required when the intervals are predicted from contigs)
#
# Output:
# Invervals with new start and end position
#
#
import os
import optparse
import sys
import os
parentdir = os.path.dirname(os.path.dirname(sys.argv[0]))
sys.path.insert(0, parentdir)
from util.quicksect import IntervalNode
from util.interval_operations import get_intervals, get_intervals_contigs, find, get_window_tree
from util.parse_sequence import get_contig_IDs, get_contig_gene_IDs
# Suppose the gene positions are at 2nd and 3rd columns
def get_genes(intervalfile):
intervals = []
with open(intervalfile, 'r') as fin:
for line in fin:
fields = line.strip().split('\t')
coord = (int(fields[1]), int(fields[2]))
intervals.append(coord)
return intervals
# Find genes in each sequence
def get_genes_contig(intervalfile, gene_id_mapping, contig_id_mapping):
intervals = {}
with open(intervalfile, 'r') as fin:
for line in fin:
fields = line.strip().split('\t')
gid = int(fields[0])
gene_name = gene_id_mapping[int(gid)]
# Get contig name from gene name
contig_name = gene_name[0:gene_name.rfind('_')]
contig_id = contig_id_mapping[contig_name]
coord = (int(fields[1]), int(fields[2]))
if contig_id not in intervals.keys():
intervals[contig_id] = [coord]
else:
intervals.setdefault(contig_id, []).append(coord)
return intervals
def merge_intervals(intervals):
if len(intervals) < 0:
return
intervals = sorted(intervals, key=lambda x: (int(x[0]), int(x[1])))
saved = list(intervals[0])
for st, en in intervals:
if st - 1 <= saved[1]:
saved[1] = max(saved[1], en)
else:
yield tuple(saved)
saved[0] = st
saved[1] = en
yield tuple(saved)
# Merge overlapped regions or regions with small gap
def merge_intervals_offset(intervals, allow_gap, gap_len):
intervals = sorted(intervals, key=lambda x: (int(x[0]), int(x[1])))
merged_intervals = []
saved = list(intervals[0])
offset = 1
if allow_gap:
offset = gap_len
for st, en in intervals:
if st - offset <= saved[1]:
saved[1] = max(saved[1], en)
else:
yield tuple(saved)
saved[0] = st
saved[1] = en
yield tuple(saved)
# Merge overlapped regions or regions with small gap
def merge_intervals_contigs(intervals_dict, allow_gap, gap_len):
merged_intervals = []
for cid, intervals in intervals_dict.items():
m_intervals = list(merge_intervals_offset(intervals, allow_gap, gap_len))
for start, end in m_intervals:
ns = '_'.join([str(cid), str(start)])
ne = '_'.join([str(cid), str(end)])
merged_intervals.append((ns, ne))
return merged_intervals
def write2file(filename, list):
outfile = open(filename, 'w')
for value in list:
if len(value) == 3:
line = '%s\t%s\t%s\n' % (
str(value[0]), str(value[1]), str(value[2]))
else:
line = '%s\t%s\n' % (str(value[0]), str(value[1]))
outfile.write(line)
outfile.close()
def extend_boundary(intervals, genes):
# build an interval to facilitate querying
tree = get_window_tree(genes)
new_intervals = []
for start, end in intervals:
overlap = find(start, end, tree)
if len(overlap) > 0:
# find the boundary coordinates of the intervals
sorted_overlap = sorted(
overlap, key=lambda x: (int(x[0]), int(x[1])))
ostart = sorted_overlap[0][0]
oend = sorted_overlap[-1][1]
intv_size = sorted_overlap[0][1] - sorted_overlap[0][0] + 1
hang_size = start - ostart + 1
intv_size1 = sorted_overlap[-1][1] - sorted_overlap[-1][0] + 1
hang_size1 = oend - end + 1
if ostart < start and hang_size < intv_size / 2:
nstart = ostart
else:
nstart = start
if oend > end and hang_size1 < intv_size1 / 2:
nend = oend
else:
nend = end
new_intervals.append((nstart, nend))
# intersects = []
# for ol in overlap:
# intersects.append(ol[0])
# intersects.append(ol[1])
# minCoord = min(intersects)
# maxCoord = max(intersects)
else:
new_intervals.append((start, end))
return new_intervals
def extend_boundary_contig(intervals, genes_dict):
# build an interval to facilitate querying
tree_dict = {}
for cid, genes in genes_dict.items():
tree = get_window_tree(genes)
tree_dict[cid] = tree
new_intervals = {} # Use dicionary to facilitate the merging of overlapping regions
for p1, p2 in intervals:
# Suppose the format for start/end is cid_coord
mark = p1.index('_')
start = int(p1[mark + 1:])
end = int(p2[mark + 1:])
contig_id = int(p1[0:mark])
tree = tree_dict[contig_id]
overlap = find(start, end, tree)
if len(overlap) > 0:
# find the boundary coordinates of the intervals
print('intervals with overlapping:')
print(p1)
print(p2)
print(overlap)
sorted_overlap = sorted(
overlap, key=lambda x: (int(x[0]), int(x[1])))
ostart = sorted_overlap[0][0]
oend = sorted_overlap[-1][1]
intv_size = sorted_overlap[0][1] - sorted_overlap[0][0] + 1
hang_size = start - ostart + 1
intv_size1 = sorted_overlap[-1][1] - sorted_overlap[-1][0] + 1
hang_size1 = oend - end + 1
if ostart < start and hang_size < intv_size / 2: # More than half of the gene is outside this region
nstart = ostart
else:
nstart = start
if oend > end and hang_size1 < intv_size1 / 2:
nend = oend
else:
nend = end
coord = (nstart, nend)
# ns = str(contig_id) + '_' + str(nstart)
# ne = str(contig_id) + '_' + str(nend)
# new_intervals.append((ns, ne))
else:
# new_intervals.append((p1, p2))
coord = (start, end)
if contig_id not in new_intervals.keys():
new_intervals[contig_id] = [coord]
else:
new_intervals.setdefault(contig_id, []).append(coord)
return new_intervals
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-g", "--genefile", dest="genefile",
help="input file of genes and their locations")
parser.add_option("-i", "--gifile", dest="gifile",
help="input file of predicted GIs")
parser.add_option("-o", "--outfile", dest="outfile", help="output file")
parser.add_option("-p", dest="allow_gap", default=False, action="store_true",
help="allow to combine adjacent intervals that are very close")
parser.add_option("-l", "--gap_len", dest="gap_len", type='int',
default=2500, help="threshold to merge adjacent intervals")
parser.add_option("-a", "--has_gene", dest="has_gene", action='store_true', default=False,
help="The gene predictions are available")
parser.add_option("-c", "--is_contig", dest="is_contig", action='store_true',
default=False, help="Analyze contigs from unassembled genomes")
parser.add_option("-m", "--genome_file", dest="genome_file",
help="input genome file in fasta format")
parser.add_option("-d", "--gid_file", dest="gid_file",
help="input file of predicted gene IDs")
(options, args) = parser.parse_args()
directory = os.path.dirname(os.path.realpath(options.gifile))
suffix = os.path.basename(options.gifile)
if options.is_contig:
orig_intervals = get_intervals_contigs(options.gifile)
count_orig = 0
count_merged = 0
merged_intervals = []
for id, intervals in orig_intervals.items():
count_orig += len(intervals)
m_intervals = list(merge_intervals(intervals))
count_merged += len(m_intervals)
for start, end in m_intervals:
ns = '_'.join([str(id), str(start)])
ne = '_'.join([str(id), str(end)])
merged_intervals.append((ns, ne))
print('The number of intevals before merging adjacent ones: %d' % count_orig)
print('The number of intevals after merging adjacent ones: %d' % count_merged)
if options.has_gene:
contig_id_mapping = get_contig_IDs(options.genome_file)
gene_id_mapping = get_contig_gene_IDs(options.gid_file)
genes_dict = get_genes_contig(options.genefile, gene_id_mapping, contig_id_mapping)
new_intervals_dict = extend_boundary_contig(merged_intervals, genes_dict)
# Merge intervals again to avoid overlapping regions and combine close intervals
merged_new_intervals = merge_intervals_contigs(
new_intervals_dict, options.allow_gap, options.gap_len)
print('The number of intevals after merging close ones with gap %d bp: %d' % (options.gap_len, len(merged_new_intervals)))
write2file(os.sep.join([directory, 'merged_']
) + suffix, merged_new_intervals)
else:
write2file(os.sep.join(
[directory, 'merged_']) + suffix, merged_intervals)
else:
orig_intervals = get_intervals(options.gifile)
merged_intervals = list(merge_intervals(orig_intervals))
print('The number of intevals before merging adjacent ones: %d' % len(orig_intervals))
print('The number of intevals after merging adjacent ones: %d' % len(merged_intervals))
if options.has_gene:
genes = get_genes(options.genefile)
new_intervals = extend_boundary(merged_intervals, genes)
merged_new_intervals = list(merge_intervals_offset(
new_intervals, options.allow_gap, options.gap_len))
print('The number of intevals after merging close ones with gap %d bp: %d' % (options.gap_len, len(merged_new_intervals)))
write2file(os.sep.join([directory, 'merged_']
) + suffix, merged_new_intervals)
else:
write2file(os.sep.join(
[directory, 'merged_']) + suffix, merged_intervals)
|
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.db.models.query import QuerySet
from django.urls import reverse
from django.utils.html import format_html
from .models import Post, Category, Tag, Comment, Link, SideBar
class BaseOwnerAdmin(admin.ModelAdmin):
exclude = ("owner",)
def save_model(self, request, obj, form, change):
obj.owner = request.user
return super().save_model(request, obj, form, change)
def get_queryset(self, request):
qs: QuerySet = super().get_queryset(request)
return qs.filter(owner=request.user)
class CategoryOwnerFilter(admin.SimpleListFilter):
title = "分类过滤"
parameter_name = "owner_category"
def lookups(self, request, model_admin):
return Category.objects.filter(owner=request.user).values_list("id", "name")
def queryset(self, request, queryset: QuerySet):
category_id = self.value()
if category_id:
return queryset.filter(category_id=self.value())
return queryset
@admin.register(Category)
class CategoryAdmin(BaseOwnerAdmin):
list_display = ("name", "status", "is_nav", "created_time", "post_count")
fields = ("name", "status", "is_nav")
def post_count(self, obj):
return obj.post_set.count()
post_count.short_description = "文章数量"
def save_model(self, request, obj, form, change):
obj.owner = request.user
return super().save_model(request, obj, form, change)
@admin.register(Tag)
class TagAdmin(BaseOwnerAdmin):
list_display = ("name", "status", "created_time")
fields = ("name", "status")
@admin.register(Post)
class PostAdmin(BaseOwnerAdmin):
list_display = [
"title",
"category",
"status",
"created_time",
"operator",
]
list_display_links = ()
# list_filter = ["category"]
list_filter = [CategoryOwnerFilter]
search_fields = ["title", "category__name"]
actions_on_top = True
actions_on_bottom = True
save_on_top = True
fields = [
("category", "title"),
"desc",
"status",
"content",
"tag",
]
def operator(self, obj):
return format_html(
"""<a href="{}">编辑</a>""", reverse("admin:blog_post_change", args=(obj.id,))
)
operator.short_description = "操作"
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ("target", "author", "content", "home_site", "created_time")
@admin.register(Link)
class LinkAdmin(admin.ModelAdmin):
list_display = ("title", "href", "status", "rank", "created_time")
fields = ("title", "href", "status", "rank")
def save_model(self, request, obj, form, change):
obj.owner = request.owner
return super().save_model(request, obj, form, change)
@admin.register(SideBar)
class SideBarAdmin(admin.ModelAdmin):
list_display = ("title", "display_type", "content", "created_time")
fields = ("title", "display_type", "content")
def save_model(self, request, obj, form, change):
obj.owner = request.owner
return super().save_model(request, obj, form, change)
@admin.register(LogEntry,)
class LogEntryAdmin(admin.ModelAdmin):
list_display = ["object_repr", "object_id", "action_flag", "user", "change_message"]
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.mqtt import MQTT # noqa: F401,E501
from swagger_server import util
class SourceOutput(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, mqtt: MQTT=None, unit: str=None, horizon_values: bool=True): # noqa: E501
"""SourceOutput - a model defined in Swagger
:param mqtt: The mqtt of this SourceOutput. # noqa: E501
:type mqtt: MQTT
:param unit: The unit of this SourceOutput. # noqa: E501
:type unit: str
:param horizon_values: The horizon_values of this SourceOutput. # noqa: E501
:type horizon_values: bool
"""
self.swagger_types = {
'mqtt': MQTT,
'unit': str,
'horizon_values': bool
}
self.attribute_map = {
'mqtt': 'mqtt',
'unit': 'unit',
'horizon_values': 'horizon_values'
}
self._mqtt = mqtt
self._unit = unit
self._horizon_values = horizon_values
@classmethod
def from_dict(cls, dikt) -> 'SourceOutput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The SourceOutput of this SourceOutput. # noqa: E501
:rtype: SourceOutput
"""
return util.deserialize_model(dikt, cls)
@property
def mqtt(self) -> MQTT:
"""Gets the mqtt of this SourceOutput.
:return: The mqtt of this SourceOutput.
:rtype: MQTT
"""
return self._mqtt
@mqtt.setter
def mqtt(self, mqtt: MQTT):
"""Sets the mqtt of this SourceOutput.
:param mqtt: The mqtt of this SourceOutput.
:type mqtt: MQTT
"""
if mqtt is None:
raise ValueError("Invalid value for `mqtt`, must not be `None`") # noqa: E501
self._mqtt = mqtt
@property
def unit(self) -> str:
"""Gets the unit of this SourceOutput.
:return: The unit of this SourceOutput.
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit: str):
"""Sets the unit of this SourceOutput.
:param unit: The unit of this SourceOutput.
:type unit: str
"""
self._unit = unit
@property
def horizon_values(self) -> bool:
"""Gets the horizon_values of this SourceOutput.
:return: The horizon_values of this SourceOutput.
:rtype: bool
"""
return self._horizon_values
@horizon_values.setter
def horizon_values(self, horizon_values: bool):
"""Sets the horizon_values of this SourceOutput.
:param horizon_values: The horizon_values of this SourceOutput.
:type horizon_values: bool
"""
self._horizon_values = horizon_values
|
import argparse
import os
import sys
from infraboxcli.push import push
from infraboxcli.run import run
from infraboxcli.graph import graph
from infraboxcli.validate import validate
from infraboxcli.list_jobs import list_jobs
from infraboxcli.log import logger
from infraboxcli.init import init
from infraboxcli.pull import pull
version = '0.6.5'
def main():
username = 'unknown'
if os.name != 'nt':
import pwd
username = pwd.getpwuid(os.getuid()).pw_name
parser = argparse.ArgumentParser(prog="infrabox")
parser.add_argument("--url",
required=False,
default=os.environ.get('INFRABOX_URL', None),
help="Address of the API server")
parser.add_argument("--ca-bundle",
required=False,
default=os.environ.get('INFRABOX_CA_BUNDLE', None),
help="Path to a CA_BUNDLE file or directory with certificates of trusted CAs")
parser.add_argument("-f", dest='infrabox_json_file', required=False, type=str,
help="Path to an infrabox.json file")
sub_parser = parser.add_subparsers(help='sub-command help')
# version
version_init = sub_parser.add_parser('version', help='Show the current version')
version_init.set_defaults(version=version)
# init
parser_init = sub_parser.add_parser('init', help='Create a simple project')
parser_init.set_defaults(is_init=True)
parser_init.set_defaults(func=init)
# push
parser_push = sub_parser.add_parser('push', help='Push a local project to InfraBox')
parser_push.add_argument("--show-console", action='store_true', required=False,
help="Show the console output of the jobs")
parser_push.set_defaults(show_console=False)
parser_push.set_defaults(validate_only=False)
parser_push.set_defaults(func=push)
# pull
parser_pull = sub_parser.add_parser('pull', help='Pull a remote job')
parser_pull.set_defaults(is_pull=True)
parser_pull.add_argument("--job-id", required=True)
parser_pull.add_argument("--no-container", required=False, dest='pull_container', action='store_false',
help="Only the inputs will be downloaded but not the actual container. Implies --no-run.")
parser_pull.set_defaults(pull_container=True)
parser_pull.add_argument("--no-run", required=False, dest='run_container', action='store_false',
help="The container will not be run.")
parser_pull.set_defaults(run_container=True)
parser_pull.set_defaults(func=pull)
# graph
parser_graph = sub_parser.add_parser('graph', help='Generate a graph of your local jobs')
parser_graph.add_argument("--output", required=True, type=str,
help="Path to the output file")
parser_graph.set_defaults(func=graph)
# validate
validate_graph = sub_parser.add_parser('validate', help='Validate infrabox.json')
validate_graph.set_defaults(func=validate)
# list
list_job = sub_parser.add_parser('list', help='List all available jobs')
list_job.set_defaults(func=list_jobs)
# run
parser_run = sub_parser.add_parser('run', help='Run your jobs locally')
parser_run.add_argument("job_name", nargs="?", type=str,
help="Job name to execute")
parser_run.add_argument("--no-rm", action='store_true', required=False,
help="Does not run 'docker-compose rm' before building")
parser_run.add_argument("--build-arg", required=False, type=str, nargs='+',
help="Set docker build arguments")
parser_run.add_argument("--env", required=False, type=str, nargs='+',
help="Override environment variables")
parser_run.add_argument("--env-file", required=False, type=str, default=None,
help="Environment file to override environment values")
parser_run.add_argument("-t", dest='tag', required=False, type=str,
help="Docker image tag")
parser_run.add_argument("-c", "--children", action='store_true',
help="Also run children of a job")
parser_run.add_argument("--local-cache", required=False, type=str,
default="/tmp/{}/infrabox/local-cache".format(username),
help="Path to the local cache")
parser_run.set_defaults(no_rm=False)
parser_run.set_defaults(func=run)
# Parse args
args = parser.parse_args()
if 'version' in args:
print('infraboxcli %s' % version)
return
if "DOCKER_HOST" in os.environ:
logger.error("DOCKER_HOST is set")
logger.error("infrabox can't be used to run jobs on a remote machine")
sys.exit(1)
if args.ca_bundle:
if args.ca_bundle.lower() == "false":
args.ca_bundle = False
else:
if not os.path.exists(args.ca_bundle):
logger.error("INFRABOX_CA_BUNDLE: %s not found" % args.ca_bundle)
sys.exit(1)
if args.infrabox_json_file:
if not os.path.exists(args.infrabox_json_file):
logger.error('%s does not exist' % args.infrabox_json_file)
sys.exit(1)
p = os.path.abspath(args.infrabox_json_file)
args.project_root = p[0:p.rfind('/')]
args.infrabox_json = p
args.project_name = os.path.basename(p)
else:
# Find infrabox.json
p = os.getcwd()
while p:
tb = os.path.join(p, 'infrabox.json')
if not os.path.exists(tb):
p = p[0:p.rfind('/')]
else:
args.project_root = p
args.infrabox_json = tb
args.project_name = os.path.basename(p)
break
if 'job_name' not in args:
args.children = True
if 'project_root' not in args and 'is_init' not in args and 'is_pull' not in args:
logger.error("infrabox.json not found in current or any parent directory")
sys.exit(1)
# Run command
args.func(args)
|
from django.contrib.auth import get_user_model
from phoenix.core.models import Outage, Solution, System
def get_outage(with_solution=False):
user = get_user_model().objects.create(username="unittest", password="unittest")
system = System.objects.create(name="Unittest-system")
outage = Outage(
summary="unittest outage",
created_by=user,
communication_assignee=user,
solution_assignee=user,
sales_affected_choice="Y",
b2b_partners_affected_choice="Y",
sales_affected="test",
systems_affected=system,
resolved=True,
)
outage.save()
if with_solution:
solution = Solution(outage=outage, created_by=user)
solution.save()
return outage
|
#
# PySNMP MIB module TIMETRA-LOG-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TIMETRA-LOG-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:09:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
SnmpSecurityLevel, SnmpMessageProcessingModel, SnmpAdminString = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpSecurityLevel", "SnmpMessageProcessingModel", "SnmpAdminString")
snmpNotifyEntry, = mibBuilder.importSymbols("SNMP-NOTIFICATION-MIB", "snmpNotifyEntry")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
sysDescr, sysObjectID = mibBuilder.importSymbols("SNMPv2-MIB", "sysDescr", "sysObjectID")
MibIdentifier, Counter32, iso, NotificationType, TimeTicks, Unsigned32, Counter64, Bits, Gauge32, ModuleIdentity, IpAddress, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter32", "iso", "NotificationType", "TimeTicks", "Unsigned32", "Counter64", "Bits", "Gauge32", "ModuleIdentity", "IpAddress", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
StorageType, TruthValue, DateAndTime, TextualConvention, DisplayString, TimeStamp, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "StorageType", "TruthValue", "DateAndTime", "TextualConvention", "DisplayString", "TimeStamp", "RowStatus")
TFilterAction, TFilterActionOrDefault = mibBuilder.importSymbols("TIMETRA-FILTER-MIB", "TFilterAction", "TFilterActionOrDefault")
tmnxSRConfs, timetraSRMIBModules, tmnxSRNotifyPrefix, tmnxSRObjs = mibBuilder.importSymbols("TIMETRA-GLOBAL-MIB", "tmnxSRConfs", "timetraSRMIBModules", "tmnxSRNotifyPrefix", "tmnxSRObjs")
TItemDescription, TQueueId, TQueueIdOrAll, TmnxOperState, TmnxActionType, TmnxAccPlcyQECounters, THsmdaCounterIdOrZeroOrAll, TmnxAdminState, TmnxAccPlcyOECounters, TmnxAccPlcyQICounters, TNamedItem, TmnxAccPlcyAACounters, TNamedItemOrEmpty, THsmdaCounterIdOrZero, TmnxAccPlcyOICounters = mibBuilder.importSymbols("TIMETRA-TC-MIB", "TItemDescription", "TQueueId", "TQueueIdOrAll", "TmnxOperState", "TmnxActionType", "TmnxAccPlcyQECounters", "THsmdaCounterIdOrZeroOrAll", "TmnxAdminState", "TmnxAccPlcyOECounters", "TmnxAccPlcyQICounters", "TNamedItem", "TmnxAccPlcyAACounters", "TNamedItemOrEmpty", "THsmdaCounterIdOrZero", "TmnxAccPlcyOICounters")
timetraLogMIBModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 6527, 1, 1, 3, 12))
timetraLogMIBModule.setRevisions(('2011-02-01 00:00', '2009-02-28 00:00', '2008-01-01 00:00', '2007-01-01 00:00', '2006-03-15 00:00', '2005-01-24 00:00', '2004-05-27 00:00', '2004-01-15 00:00', '2003-08-15 00:00', '2003-01-20 00:00', '2001-11-10 00:00',))
if mibBuilder.loadTexts: timetraLogMIBModule.setLastUpdated('201102010000Z')
if mibBuilder.loadTexts: timetraLogMIBModule.setOrganization('Alcatel-Lucent')
tmnxLogObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12))
tmnxLogNotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1))
tmnxLogNotifyPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12))
tmnxLogNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0))
tmnxLogConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12))
class TmnxPerceivedSeverity(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("none", 0), ("cleared", 1), ("indeterminate", 2), ("critical", 3), ("major", 4), ("minor", 5), ("warning", 6))
class TmnxSyslogId(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 10)
class TmnxSyslogIdOrEmpty(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 10), )
class TmnxSyslogFacility(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))
namedValues = NamedValues(("kernel", 0), ("user", 1), ("mail", 2), ("systemd", 3), ("auth", 4), ("syslogd", 5), ("printer", 6), ("netnews", 7), ("uucp", 8), ("cron", 9), ("authpriv", 10), ("ftp", 11), ("ntp", 12), ("logaudit", 13), ("logalert", 14), ("cron2", 15), ("local0", 16), ("local1", 17), ("local2", 18), ("local3", 19), ("local4", 20), ("local5", 21), ("local6", 22), ("local7", 23))
class TmnxUdpPort(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
class TmnxSyslogSeverity(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("emergency", 0), ("alert", 1), ("critical", 2), ("error", 3), ("warning", 4), ("notice", 5), ("info", 6), ("debug", 7))
class TmnxLogFileId(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 99)
class TmnxLogFileType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("none", 0), ("eventLog", 1), ("accountingPolicy", 2))
class TmnxLogIdIndex(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 100)
class TmnxCFlash(TextualConvention, Unsigned32):
status = 'current'
class TmnxLogFilterId(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 1001)
class TmnxLogFilterEntryId(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 999)
class TmnxLogFilterOperator(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("off", 1), ("equal", 2), ("notEqual", 3), ("lessThan", 4), ("lessThanOrEqual", 5), ("greaterThan", 6), ("greaterThanOrEqual", 7))
class TmnxEventNumber(TextualConvention, Unsigned32):
status = 'current'
tmnxLogMaxLogs = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 2), Unsigned32().clone(15)).setUnits('logs').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogMaxLogs.setStatus('current')
tmnxLogFileIdTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3), )
if mibBuilder.loadTexts: tmnxLogFileIdTable.setStatus('current')
tmnxLogFileIdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogFileId"))
if mibBuilder.loadTexts: tmnxLogFileIdEntry.setStatus('current')
tmnxLogFileId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 1), TmnxLogFileId())
if mibBuilder.loadTexts: tmnxLogFileId.setStatus('current')
tmnxLogFileIdRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdRowStatus.setStatus('current')
tmnxLogFileIdStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 3), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdStorageType.setStatus('current')
tmnxLogFileIdRolloverTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 10080)).clone(1440)).setUnits('minutes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdRolloverTime.setStatus('current')
tmnxLogFileIdRetainTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 500)).clone(12)).setUnits('hours').setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdRetainTime.setStatus('current')
tmnxLogFileIdAdminLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 6), TmnxCFlash()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdAdminLocation.setStatus('current')
tmnxLogFileIdOperLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 7), TmnxCFlash()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogFileIdOperLocation.setStatus('current')
tmnxLogFileIdDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 8), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdDescription.setStatus('current')
tmnxLogFileIdLogType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 9), TmnxLogFileType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogFileIdLogType.setStatus('current')
tmnxLogFileIdLogId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogFileIdLogId.setStatus('current')
tmnxLogFileIdPathName = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogFileIdPathName.setStatus('current')
tmnxLogFileIdCreateTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 12), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogFileIdCreateTime.setStatus('current')
tmnxLogFileIdBackupLoc = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 3, 1, 13), TmnxCFlash()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFileIdBackupLoc.setStatus('current')
tmnxLogApTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4), )
if mibBuilder.loadTexts: tmnxLogApTable.setStatus('current')
tmnxLogApEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogApPolicyId"))
if mibBuilder.loadTexts: tmnxLogApEntry.setStatus('current')
tmnxLogApPolicyId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 99)))
if mibBuilder.loadTexts: tmnxLogApPolicyId.setStatus('current')
tmnxLogApRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApRowStatus.setStatus('current')
tmnxLogApStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 3), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApStorageType.setStatus('current')
tmnxLogApAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 4), TmnxAdminState().clone('outOfService')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApAdminStatus.setStatus('current')
tmnxLogApOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 5), TmnxOperState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApOperStatus.setStatus('current')
tmnxLogApInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 120)).clone(5)).setUnits('minutes').setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApInterval.setStatus('current')
tmnxLogApDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 7), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApDescription.setStatus('current')
tmnxLogApDefault = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 8), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApDefault.setStatus('current')
tmnxLogApRecord = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61))).clone(namedValues=NamedValues(("none", 0), ("svcIngressOctet", 1), ("svcEgressOctet", 2), ("svcIngressPkt", 3), ("svcEgressPkt", 4), ("netIngressOctet", 5), ("netEgressOctet", 6), ("netIngressPkt", 7), ("netEgressPkt", 8), ("compactSvcInOctet", 9), ("combinedSvcIngress", 10), ("combinedNetInEgOctet", 11), ("combinedSvcInEgOctet", 12), ("completeSvcInEg", 13), ("combinedSvcSdpInEg", 14), ("completeSvcSdpInEg", 15), ("completeSubscrIngrEgr", 16), ("bsxProtocol", 17), ("bsxApplication", 18), ("bsxAppGroup", 19), ("bsxSubscriberProtocol", 20), ("bsxSubscriberApplication", 21), ("bsxSubscriberAppGroup", 22), ("customRecordSubscriber", 23), ("customRecordService", 24), ("customRecordAa", 25), ("queueGroupOctets", 26), ("queueGroupPackets", 27), ("combinedQueueGroup", 28), ("combinedMplsLspIngress", 29), ("combinedMplsLspEgress", 30), ("combinedLdpLspEgress", 31), ("saa", 32), ("video", 33), ("kpiSystem", 34), ("kpiBearerMgmt", 35), ("kpiBearerTraffic", 36), ("kpiRefPoint", 37), ("kpiPathMgmt", 38), ("kpiIom3", 39), ("kciSystem", 40), ("kciBearerMgmt", 41), ("kciPathMgmt", 42), ("completeKpi", 43), ("completeKci", 44), ("kpiBearerGroup", 45), ("kpiRefPathGroup", 46), ("kpiKciBearerMgmt", 47), ("kpiKciPathMgmt", 48), ("kpiKciSystem", 49), ("completeKpiKci", 50), ("aaPerformance", 51), ("netInfIngressOct", 52), ("netInfIngressPkt", 53), ("combinedNetInfIngress", 54), ("accessEgressPkt", 55), ("accessEgressOct", 56), ("combinedAccessEgress", 57), ("combinedNetEgress", 58), ("combinedSvcEgress", 59), ("combinedSvcInEgPkt", 60), ("combinedNetInEgPkt", 61))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApRecord.setStatus('current')
tmnxLogApToFileId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 10), TmnxLogFileId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApToFileId.setStatus('current')
tmnxLogApPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))).clone(namedValues=NamedValues(("none", 0), ("access", 1), ("network", 2), ("sdp", 3), ("subscriber", 4), ("appAssure", 5), ("qgrp", 6), ("saa", 7), ("mplsLspIngr", 8), ("mplsLspEgr", 9), ("ldpLspEgr", 10), ("video", 11), ("mobileGateway", 12), ("networkIf", 13), ("accessport", 14)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApPortType.setStatus('current')
tmnxLogApDefaultInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 12), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApDefaultInterval.setStatus('current')
tmnxLogApDataLossCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApDataLossCount.setStatus('current')
tmnxLogApLastDataLossTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 14), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApLastDataLossTimeStamp.setStatus('current')
tmnxLogApToFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 4, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("fileId", 0), ("noFile", 1))).clone('fileId')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApToFileType.setStatus('current')
tmnxLogIdTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5), )
if mibBuilder.loadTexts: tmnxLogIdTable.setStatus('current')
tmnxLogIdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogIdIndex"))
if mibBuilder.loadTexts: tmnxLogIdEntry.setStatus('current')
tmnxLogIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 1), TmnxLogIdIndex())
if mibBuilder.loadTexts: tmnxLogIdIndex.setStatus('current')
tmnxLogIdRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdRowStatus.setStatus('current')
tmnxLogIdStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 3), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdStorageType.setStatus('current')
tmnxLogIdAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 4), TmnxAdminState().clone('inService')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdAdminStatus.setStatus('current')
tmnxLogIdOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 5), TmnxOperState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogIdOperStatus.setStatus('current')
tmnxLogIdDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 6), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdDescription.setStatus('current')
tmnxLogIdFilterId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 7), TmnxLogFilterId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdFilterId.setStatus('current')
tmnxLogIdSource = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 8), Bits().clone(namedValues=NamedValues(("main", 0), ("security", 1), ("change", 2), ("debugTrace", 3), ("li", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdSource.setStatus('current')
tmnxLogIdDestination = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none", 0), ("console", 1), ("syslog", 2), ("snmpTraps", 3), ("file", 4), ("memory", 5))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdDestination.setStatus('current')
tmnxLogIdFileId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 10), TmnxLogFileId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdFileId.setStatus('current')
tmnxLogIdSyslogId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 11), TmnxSyslogIdOrEmpty()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdSyslogId.setStatus('current')
tmnxLogIdMaxMemorySize = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 12), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(50, 3000), )).clone(100)).setUnits('events').setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdMaxMemorySize.setStatus('current')
tmnxLogIdConsoleSession = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 13), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdConsoleSession.setStatus('current')
tmnxLogIdForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogIdForwarded.setStatus('current')
tmnxLogIdDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogIdDropped.setStatus('current')
tmnxLogIdTimeFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 5, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("utc", 1), ("local", 2))).clone('utc')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogIdTimeFormat.setStatus('current')
tmnxLogFilterTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6), )
if mibBuilder.loadTexts: tmnxLogFilterTable.setStatus('current')
tmnxLogFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogFilterId"))
if mibBuilder.loadTexts: tmnxLogFilterEntry.setStatus('current')
tmnxLogFilterId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6, 1, 1), TmnxLogFilterId().subtype(subtypeSpec=ValueRangeConstraint(1, 1001)))
if mibBuilder.loadTexts: tmnxLogFilterId.setStatus('current')
tmnxLogFilterRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterRowStatus.setStatus('current')
tmnxLogFilterDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6, 1, 3), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterDescription.setStatus('current')
tmnxLogFilterDefaultAction = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6, 1, 4), TFilterAction().clone('forward')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterDefaultAction.setStatus('current')
tmnxLogFilterInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 6, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogFilterInUse.setStatus('current')
tmnxLogFilterParamsTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7), )
if mibBuilder.loadTexts: tmnxLogFilterParamsTable.setStatus('current')
tmnxLogFilterParamsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogFilterId"), (0, "TIMETRA-LOG-MIB", "tmnxLogFilterParamsIndex"))
if mibBuilder.loadTexts: tmnxLogFilterParamsEntry.setStatus('current')
tmnxLogFilterParamsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 1), TmnxLogFilterEntryId())
if mibBuilder.loadTexts: tmnxLogFilterParamsIndex.setStatus('current')
tmnxLogFilterParamsRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsRowStatus.setStatus('current')
tmnxLogFilterParamsDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 3), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsDescription.setStatus('current')
tmnxLogFilterParamsAction = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 4), TFilterActionOrDefault().clone('default')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsAction.setStatus('current')
tmnxLogFilterParamsApplication = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 5), TNamedItemOrEmpty().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsApplication.setStatus('current')
tmnxLogFilterParamsApplOperator = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 6), TmnxLogFilterOperator().clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsApplOperator.setStatus('current')
tmnxLogFilterParamsNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 7), TmnxEventNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsNumber.setStatus('current')
tmnxLogFilterParamsNumberOperator = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 8), TmnxLogFilterOperator().clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsNumberOperator.setStatus('current')
tmnxLogFilterParamsSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 9), TmnxPerceivedSeverity().clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsSeverity.setStatus('current')
tmnxLogFilterParamsSeverityOperator = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 10), TmnxLogFilterOperator().clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsSeverityOperator.setStatus('current')
tmnxLogFilterParamsSubject = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 11), TNamedItemOrEmpty().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsSubject.setStatus('current')
tmnxLogFilterParamsSubjectOperator = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 12), TmnxLogFilterOperator().clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsSubjectOperator.setStatus('current')
tmnxLogFilterParamsSubjectRegexp = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 13), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsSubjectRegexp.setStatus('current')
tmnxLogFilterParamsRouter = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 14), TNamedItemOrEmpty().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsRouter.setStatus('current')
tmnxLogFilterParamsRouterOperator = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 15), TmnxLogFilterOperator().clone('off')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsRouterOperator.setStatus('current')
tmnxLogFilterParamsRouterRegexp = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 7, 1, 16), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogFilterParamsRouterRegexp.setStatus('current')
tmnxSyslogTargetTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8), )
if mibBuilder.loadTexts: tmnxSyslogTargetTable.setStatus('current')
tmnxSyslogTargetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxSyslogTargetIndex"))
if mibBuilder.loadTexts: tmnxSyslogTargetEntry.setStatus('current')
tmnxSyslogTargetIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 1), TmnxSyslogId())
if mibBuilder.loadTexts: tmnxSyslogTargetIndex.setStatus('current')
tmnxSyslogTargetRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetRowStatus.setStatus('current')
tmnxSyslogTargetDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 3), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetDescription.setStatus('current')
tmnxSyslogTargetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 4), IpAddress().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetAddress.setStatus('obsolete')
tmnxSyslogTargetUdpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 5), TmnxUdpPort().clone(514)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetUdpPort.setStatus('current')
tmnxSyslogTargetFacility = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 6), TmnxSyslogFacility().clone('local7')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetFacility.setStatus('current')
tmnxSyslogTargetSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 7), TmnxSyslogSeverity().clone('info')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetSeverity.setStatus('current')
tmnxSyslogTargetMessagePrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 8), TNamedItemOrEmpty().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetMessagePrefix.setStatus('current')
tmnxSyslogTargetMessagesDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSyslogTargetMessagesDropped.setStatus('current')
tmnxSyslogTargetAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 10), InetAddressType().clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetAddrType.setStatus('current')
tmnxSyslogTargetAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 8, 1, 11), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), ValueSizeConstraint(20, 20), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSyslogTargetAddr.setStatus('current')
tmnxEventAppTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 9), )
if mibBuilder.loadTexts: tmnxEventAppTable.setStatus('current')
tmnxEventAppEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 9, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxEventAppIndex"))
if mibBuilder.loadTexts: tmnxEventAppEntry.setStatus('current')
tmnxEventAppIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 9, 1, 1), Unsigned32())
if mibBuilder.loadTexts: tmnxEventAppIndex.setStatus('current')
tmnxEventAppName = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 9, 1, 2), TNamedItem()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxEventAppName.setStatus('current')
tmnxEventTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10), )
if mibBuilder.loadTexts: tmnxEventTable.setStatus('current')
tmnxEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxEventAppIndex"), (0, "TIMETRA-LOG-MIB", "tmnxEventID"))
if mibBuilder.loadTexts: tmnxEventEntry.setStatus('current')
tmnxEventID = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 1), Unsigned32())
if mibBuilder.loadTexts: tmnxEventID.setStatus('current')
tmnxEventName = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 2), TNamedItem()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxEventName.setStatus('current')
tmnxEventSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 3), TmnxPerceivedSeverity()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventSeverity.setStatus('current')
tmnxEventControl = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventControl.setStatus('current')
tmnxEventCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxEventCounter.setStatus('current')
tmnxEventDropCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxEventDropCount.setStatus('current')
tmnxEventReset = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 7), TmnxActionType().clone('notApplicable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventReset.setStatus('current')
tmnxEventThrottle = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 10, 1, 8), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventThrottle.setStatus('current')
tmnxSnmpTrapGroupTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11), )
if mibBuilder.loadTexts: tmnxSnmpTrapGroupTable.setStatus('obsolete')
tmnxSnmpTrapGroupEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxStgIndex"), (0, "TIMETRA-LOG-MIB", "tmnxStgDestAddress"), (0, "TIMETRA-LOG-MIB", "tmnxStgDestPort"))
if mibBuilder.loadTexts: tmnxSnmpTrapGroupEntry.setStatus('obsolete')
tmnxStgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 1), TmnxLogIdIndex())
if mibBuilder.loadTexts: tmnxStgIndex.setStatus('obsolete')
tmnxStgDestAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 2), IpAddress().clone(hexValue="00000000"))
if mibBuilder.loadTexts: tmnxStgDestAddress.setStatus('obsolete')
tmnxStgDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 3), TmnxUdpPort().clone(162))
if mibBuilder.loadTexts: tmnxStgDestPort.setStatus('obsolete')
tmnxStgRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStgRowStatus.setStatus('obsolete')
tmnxStgDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 5), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStgDescription.setStatus('obsolete')
tmnxStgVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 6), SnmpMessageProcessingModel().clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStgVersion.setStatus('obsolete')
tmnxStgNotifyCommunity = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32)).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStgNotifyCommunity.setStatus('obsolete')
tmnxStgSecurityLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 11, 1, 8), SnmpSecurityLevel().clone('noAuthNoPriv')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStgSecurityLevel.setStatus('obsolete')
tmnxEventTest = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 12), TmnxActionType().clone('notApplicable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventTest.setStatus('current')
tmnxEventThrottleLimit = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 13), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 20000)).clone(2000)).setUnits('events').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventThrottleLimit.setStatus('current')
tmnxEventThrottleInterval = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 14), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 1200)).clone(1)).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventThrottleInterval.setStatus('current')
tmnxSnmpSetErrsMax = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSnmpSetErrsMax.setStatus('current')
tmnxSnmpSetErrsTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16), )
if mibBuilder.loadTexts: tmnxSnmpSetErrsTable.setStatus('current')
tmnxSnmpSetErrsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxSseAddressType"), (0, "TIMETRA-LOG-MIB", "tmnxSseAddress"), (0, "TIMETRA-LOG-MIB", "tmnxSseSnmpPort"), (0, "TIMETRA-LOG-MIB", "tmnxSseRequestId"))
if mibBuilder.loadTexts: tmnxSnmpSetErrsEntry.setStatus('current')
tmnxSseAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 1), InetAddressType())
if mibBuilder.loadTexts: tmnxSseAddressType.setStatus('current')
tmnxSseAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 2), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), )))
if mibBuilder.loadTexts: tmnxSseAddress.setStatus('current')
tmnxSseSnmpPort = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 3), TmnxUdpPort())
if mibBuilder.loadTexts: tmnxSseSnmpPort.setStatus('current')
tmnxSseRequestId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 4), Unsigned32())
if mibBuilder.loadTexts: tmnxSseRequestId.setStatus('current')
tmnxSseVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 5), SnmpMessageProcessingModel()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseVersion.setStatus('current')
tmnxSseSeverityLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 6), TmnxPerceivedSeverity()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseSeverityLevel.setStatus('current')
tmnxSseModuleId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseModuleId.setStatus('current')
tmnxSseModuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 8), TNamedItem()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseModuleName.setStatus('current')
tmnxSseErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseErrorCode.setStatus('current')
tmnxSseErrorName = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseErrorName.setStatus('current')
tmnxSseErrorMsg = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseErrorMsg.setStatus('current')
tmnxSseExtraText = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 12), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 320))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseExtraText.setStatus('current')
tmnxSseTimestamp = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 16, 1, 13), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxSseTimestamp.setStatus('current')
tmnxSnmpTrapLogTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 17), )
if mibBuilder.loadTexts: tmnxSnmpTrapLogTable.setStatus('current')
tmnxSnmpTrapLogEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 17, 1), )
snmpNotifyEntry.registerAugmentions(("TIMETRA-LOG-MIB", "tmnxSnmpTrapLogEntry"))
tmnxSnmpTrapLogEntry.setIndexNames(*snmpNotifyEntry.getIndexNames())
if mibBuilder.loadTexts: tmnxSnmpTrapLogEntry.setStatus('current')
tmnxSnmpTrapLogDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 17, 1, 1), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxSnmpTrapLogDescription.setStatus('current')
tmnxSnmpTrapDestTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18), )
if mibBuilder.loadTexts: tmnxSnmpTrapDestTable.setStatus('current')
tmnxSnmpTrapDestEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxStdIndex"), (1, "TIMETRA-LOG-MIB", "tmnxStdName"))
if mibBuilder.loadTexts: tmnxSnmpTrapDestEntry.setStatus('current')
tmnxStdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 1), TmnxLogIdIndex())
if mibBuilder.loadTexts: tmnxStdIndex.setStatus('current')
tmnxStdName = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 28)))
if mibBuilder.loadTexts: tmnxStdName.setStatus('current')
tmnxStdRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdRowStatus.setStatus('current')
tmnxStdRowLastChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxStdRowLastChanged.setStatus('current')
tmnxStdDestAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 5), InetAddressType().clone('unknown')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdDestAddrType.setStatus('current')
tmnxStdDestAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 6), InetAddress().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(4, 4), ValueSizeConstraint(16, 16), ValueSizeConstraint(20, 20), )).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdDestAddr.setStatus('current')
tmnxStdDestPort = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 7), TmnxUdpPort().clone(162)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdDestPort.setStatus('current')
tmnxStdDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 8), TItemDescription().clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdDescription.setStatus('current')
tmnxStdVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 9), SnmpMessageProcessingModel().clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdVersion.setStatus('current')
tmnxStdNotifyCommunity = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 31)).clone(hexValue="")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdNotifyCommunity.setStatus('current')
tmnxStdSecurityLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 11), SnmpSecurityLevel().clone('noAuthNoPriv')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdSecurityLevel.setStatus('current')
tmnxStdReplay = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 12), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxStdReplay.setStatus('current')
tmnxStdReplayStart = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxStdReplayStart.setStatus('current')
tmnxStdReplayLastTime = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 18, 1, 14), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxStdReplayLastTime.setStatus('current')
tmnxStdMaxTargets = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 19), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(10, 100)).clone(25)).setUnits('trap-targets').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxStdMaxTargets.setStatus('current')
tmnxLogApCustRecordTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20), )
if mibBuilder.loadTexts: tmnxLogApCustRecordTable.setStatus('current')
tmnxLogApCustRecordEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1), )
tmnxLogApEntry.registerAugmentions(("TIMETRA-LOG-MIB", "tmnxLogApCustRecordEntry"))
tmnxLogApCustRecordEntry.setIndexNames(*tmnxLogApEntry.getIndexNames())
if mibBuilder.loadTexts: tmnxLogApCustRecordEntry.setStatus('current')
tmnxLogApCrLastChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 1), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApCrLastChanged.setStatus('current')
tmnxLogApCrSignChangeDelta = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 2), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeDelta.setStatus('current')
tmnxLogApCrSignChangeQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 3), TQueueIdOrAll()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeQueue.setStatus('current')
tmnxLogApCrSignChangeOCntr = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 4), THsmdaCounterIdOrZeroOrAll()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeOCntr.setStatus('current')
tmnxLogApCrSignChangeQICounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 5), TmnxAccPlcyQICounters().clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeQICounters.setStatus('current')
tmnxLogApCrSignChangeQECounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 6), TmnxAccPlcyQECounters().clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeQECounters.setStatus('current')
tmnxLogApCrSignChangeOICounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 7), TmnxAccPlcyOICounters().clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeOICounters.setStatus('current')
tmnxLogApCrSignChangeOECounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 8), TmnxAccPlcyOECounters().clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeOECounters.setStatus('current')
tmnxLogApCrSignChangeAACounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 9), TmnxAccPlcyAACounters().clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrSignChangeAACounters.setStatus('current')
tmnxLogApCrAACounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 20, 1, 10), TmnxAccPlcyAACounters().clone(hexValue="0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogApCrAACounters.setStatus('current')
tmnxLogApCustRecordQueueTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21), )
if mibBuilder.loadTexts: tmnxLogApCustRecordQueueTable.setStatus('current')
tmnxLogApCustRecordQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogApPolicyId"), (0, "TIMETRA-LOG-MIB", "tmnxLogApCrQueueId"))
if mibBuilder.loadTexts: tmnxLogApCustRecordQueueEntry.setStatus('current')
tmnxLogApCrQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21, 1, 1), TQueueId().subtype(subtypeSpec=ValueRangeConstraint(1, 32)))
if mibBuilder.loadTexts: tmnxLogApCrQueueId.setStatus('current')
tmnxLogApCrQueueRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApCrQueueRowStatus.setStatus('current')
tmnxLogApCrQueueLastChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApCrQueueLastChanged.setStatus('current')
tmnxLogApCrQueueICounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21, 1, 4), TmnxAccPlcyQICounters().clone(hexValue="0")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApCrQueueICounters.setStatus('current')
tmnxLogApCrQueueECounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 21, 1, 5), TmnxAccPlcyQECounters().clone(hexValue="0")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApCrQueueECounters.setStatus('current')
tmnxLogApCrOverrideCntrTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22), )
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrTable.setStatus('current')
tmnxLogApCrOverrideCntrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogApPolicyId"), (0, "TIMETRA-LOG-MIB", "tmnxLogApCrOverrideCntrId"))
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrEntry.setStatus('current')
tmnxLogApCrOverrideCntrId = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22, 1, 1), THsmdaCounterIdOrZero().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrId.setStatus('current')
tmnxLogApCrOverrideCntrRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrRowStatus.setStatus('current')
tmnxLogApCrOverrideCntrLastChngd = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrLastChngd.setStatus('current')
tmnxLogApCrOverrideCntrICounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22, 1, 4), TmnxAccPlcyOICounters().clone(hexValue="0")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrICounters.setStatus('current')
tmnxLogApCrOverrideCntrECounters = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 22, 1, 5), TmnxAccPlcyOECounters().clone(hexValue="0")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: tmnxLogApCrOverrideCntrECounters.setStatus('current')
tmnxEventPrimaryRoutePref = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("inband", 1), ("outband", 2))).clone('outband')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventPrimaryRoutePref.setStatus('current')
tmnxEventSecondaryRoutePref = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inband", 1), ("outband", 2), ("none", 3))).clone('inband')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxEventSecondaryRoutePref.setStatus('current')
tmnxLogConfigEventsDamped = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 25), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogConfigEventsDamped.setStatus('current')
tmnxLogEventHistoryObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26))
tmnxLogEventHistGeneralObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 1))
tmnxLogExRbkOpTblLastChange = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 1, 1), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpTblLastChange.setStatus('current')
tmnxLogExRbkOpMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10)).clone(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogExRbkOpMaxEntries.setStatus('current')
tmnxLogExecRollbackOpTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3), )
if mibBuilder.loadTexts: tmnxLogExecRollbackOpTable.setStatus('current')
tmnxLogExecRollbackOpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogExRbkOpIndex"))
if mibBuilder.loadTexts: tmnxLogExecRollbackOpEntry.setStatus('current')
tmnxLogExRbkOpIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 1), Unsigned32())
if mibBuilder.loadTexts: tmnxLogExRbkOpIndex.setStatus('current')
tmnxLogExRbkOpLastChanged = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 2), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpLastChanged.setStatus('current')
tmnxLogExRbkOpType = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("unknown", 0), ("exec", 1), ("rollback", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpType.setStatus('current')
tmnxLogExRbkOpStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("inProgress", 1), ("success", 2), ("failed", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpStatus.setStatus('current')
tmnxLogExRbkOpBegin = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 5), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpBegin.setStatus('current')
tmnxLogExRbkOpEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 6), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpEnd.setStatus('current')
tmnxLogExRbkOpFile = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpFile.setStatus('current')
tmnxLogExRbkOpUser = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 8), TNamedItem()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpUser.setStatus('current')
tmnxLogExRbkOpNumEvents = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 3, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkOpNumEvents.setStatus('current')
tmnxLogExecRollbackEventTable = MibTable((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 4), )
if mibBuilder.loadTexts: tmnxLogExecRollbackEventTable.setStatus('current')
tmnxLogExecRollbackEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 4, 1), ).setIndexNames((0, "TIMETRA-LOG-MIB", "tmnxLogExRbkOpIndex"), (0, "TIMETRA-LOG-MIB", "tmnxLogExRbkEventIndex"))
if mibBuilder.loadTexts: tmnxLogExecRollbackEventEntry.setStatus('current')
tmnxLogExRbkEventIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 4, 1, 1), Unsigned32())
if mibBuilder.loadTexts: tmnxLogExRbkEventIndex.setStatus('current')
tmnxLogExRbkEventOID = MibTableColumn((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 4, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: tmnxLogExRbkEventOID.setStatus('current')
tmnxLogExRbkNotifyObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 5))
tmnxLogExecRollbackOpIndex = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 26, 5, 1), Unsigned32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogExecRollbackOpIndex.setStatus('current')
tmnxLogColdStartWaitTime = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 300))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogColdStartWaitTime.setStatus('current')
tmnxLogRouteRecoveryWaitTime = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 28), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setUnits('seconds').setMaxAccess("readwrite")
if mibBuilder.loadTexts: tmnxLogRouteRecoveryWaitTime.setStatus('current')
tmnxLogFileDeletedLogId = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 1), TmnxLogIdIndex()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogFileDeletedLogId.setStatus('current')
tmnxLogFileDeletedFileId = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 2), TmnxLogFileId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogFileDeletedFileId.setStatus('current')
tmnxLogFileDeletedLogType = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 3), TmnxLogFileType()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogFileDeletedLogType.setStatus('current')
tmnxLogFileDeletedLocation = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 4), TmnxCFlash()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogFileDeletedLocation.setStatus('current')
tmnxLogFileDeletedName = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 5), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogFileDeletedName.setStatus('current')
tmnxLogFileDeletedCreateTime = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 6), DateAndTime()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogFileDeletedCreateTime.setStatus('current')
tmnxLogTraceErrorTitle = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 50))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogTraceErrorTitle.setStatus('current')
tmnxLogTraceErrorSubject = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 50))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogTraceErrorSubject.setStatus('current')
tmnxLogTraceErrorMessage = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 9), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogTraceErrorMessage.setStatus('current')
tmnxLogThrottledEventID = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 10), ObjectIdentifier()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogThrottledEventID.setStatus('current')
tmnxLogThrottledEvents = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 11), Unsigned32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogThrottledEvents.setStatus('current')
tmnxSysLogTargetId = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 12), TmnxSyslogId()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxSysLogTargetId.setStatus('current')
tmnxSysLogTargetProblemDescr = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 13), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxSysLogTargetProblemDescr.setStatus('current')
tmnxLogNotifyApInterval = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(5, 120))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxLogNotifyApInterval.setStatus('current')
tmnxStdReplayStartEvent = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 15), Unsigned32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxStdReplayStartEvent.setStatus('current')
tmnxStdReplayEndEvent = MibScalar((1, 3, 6, 1, 4, 1, 6527, 3, 1, 2, 12, 1, 16), Unsigned32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: tmnxStdReplayEndEvent.setStatus('current')
tmnxLogSpaceContention = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 1)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileIdRolloverTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdRetainTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdAdminLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdBackupLoc"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdOperLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogType"))
if mibBuilder.loadTexts: tmnxLogSpaceContention.setStatus('current')
tmnxLogAdminLocFailed = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 2)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileIdAdminLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdBackupLoc"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdOperLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogType"))
if mibBuilder.loadTexts: tmnxLogAdminLocFailed.setStatus('current')
tmnxLogBackupLocFailed = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 3)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileIdAdminLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdBackupLoc"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdOperLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogType"))
if mibBuilder.loadTexts: tmnxLogBackupLocFailed.setStatus('current')
tmnxLogFileRollover = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 4)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileIdRolloverTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdRetainTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdAdminLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdBackupLoc"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdOperLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdPathName"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdCreateTime"))
if mibBuilder.loadTexts: tmnxLogFileRollover.setStatus('current')
tmnxLogFileDeleted = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 5)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedFileId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedName"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedCreateTime"))
if mibBuilder.loadTexts: tmnxLogFileDeleted.setStatus('current')
tmnxTestEvent = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 6)).setObjects(("SNMPv2-MIB", "sysDescr"), ("SNMPv2-MIB", "sysObjectID"))
if mibBuilder.loadTexts: tmnxTestEvent.setStatus('current')
tmnxLogTraceError = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 7)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogTraceErrorTitle"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorMessage"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorSubject"))
if mibBuilder.loadTexts: tmnxLogTraceError.setStatus('current')
tmnxLogEventThrottled = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 8)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogThrottledEventID"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEvents"))
if mibBuilder.loadTexts: tmnxLogEventThrottled.setStatus('current')
tmnxSysLogTargetProblem = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 9)).setObjects(("TIMETRA-LOG-MIB", "tmnxSysLogTargetId"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetProblemDescr"))
if mibBuilder.loadTexts: tmnxSysLogTargetProblem.setStatus('current')
tmnxLogAccountingDataLoss = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 10)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileIdRolloverTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdRetainTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdAdminLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdBackupLoc"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdOperLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogId"), ("TIMETRA-LOG-MIB", "tmnxLogNotifyApInterval"))
if mibBuilder.loadTexts: tmnxLogAccountingDataLoss.setStatus('current')
tmnxStdEventsReplayed = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 11)).setObjects(("TIMETRA-LOG-MIB", "tmnxStdDestAddrType"), ("TIMETRA-LOG-MIB", "tmnxStdDestAddr"), ("TIMETRA-LOG-MIB", "tmnxStdReplayStartEvent"), ("TIMETRA-LOG-MIB", "tmnxStdReplayEndEvent"), ("TIMETRA-LOG-MIB", "tmnxStdReplayStart"))
if mibBuilder.loadTexts: tmnxStdEventsReplayed.setStatus('current')
tmnxLogEventOverrun = NotificationType((1, 3, 6, 1, 4, 1, 6527, 3, 1, 3, 12, 0, 12)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogThrottledEventID"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEvents"))
if mibBuilder.loadTexts: tmnxLogEventOverrun.setStatus('current')
tmnxLogCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1))
tmnxLogGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2))
tmnxLogV4v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 4)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV4v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogGroup"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsR2r1Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationR3r0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV4v0Compliance = tmnxLogV4v0Compliance.setStatus('obsolete')
tmnxLogV5v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 5)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV5v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV5v0Compliance = tmnxLogV5v0Compliance.setStatus('obsolete')
tmnxLogV6v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 6)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapDestV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV6v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV6v0Compliance = tmnxLogV6v0Compliance.setStatus('obsolete')
tmnxLogV6v1Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 7)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapDestV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyV6v1Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV6v1Compliance = tmnxLogV6v1Compliance.setStatus('current')
tmnxLogV7v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 8)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapDestV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyV6v1Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyCRV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogRoutePreferenceV7v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV7v0Compliance = tmnxLogV7v0Compliance.setStatus('obsolete')
tmnxLogV9v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 9)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyV6v1Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyCRV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapDestV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV9v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogRoutePreferenceV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogEventDampedV8v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogApV9v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV9v0Compliance = tmnxLogV9v0Compliance.setStatus('obsolete')
tmnxLogV8v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 10)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapDestV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyV6v1Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyCRV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogRoutePreferenceV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogEventDampedV8v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV8v0Compliance = tmnxLogV8v0Compliance.setStatus('obsolete')
tmnxLogV10v0Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 1, 11)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogGlobalGroup"), ("TIMETRA-LOG-MIB", "tmnxLogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyGroup"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyV6v1Group"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingPolicyCRV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdGroup"), ("TIMETRA-LOG-MIB", "tmnxLogSyslogV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpTrapDestV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsGroup"), ("TIMETRA-LOG-MIB", "tmnxLogEventsV5v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV6v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogNotificationV9v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogRoutePreferenceV7v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogEventDampedV8v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogApV9v0Group"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpGroup"), ("TIMETRA-LOG-MIB", "tmnxLogApExtGroup"), ("TIMETRA-LOG-MIB", "tmnxLogAppRouteNotifV10v0Group"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV10v0Compliance = tmnxLogV10v0Compliance.setStatus('current')
tmnxLogGlobalGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 1)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogMaxLogs"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogGlobalGroup = tmnxLogGlobalGroup.setStatus('current')
tmnxLogAccountingPolicyGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 3)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogApRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogApStorageType"), ("TIMETRA-LOG-MIB", "tmnxLogApAdminStatus"), ("TIMETRA-LOG-MIB", "tmnxLogApOperStatus"), ("TIMETRA-LOG-MIB", "tmnxLogApInterval"), ("TIMETRA-LOG-MIB", "tmnxLogApDescription"), ("TIMETRA-LOG-MIB", "tmnxLogApDefault"), ("TIMETRA-LOG-MIB", "tmnxLogApRecord"), ("TIMETRA-LOG-MIB", "tmnxLogApToFileId"), ("TIMETRA-LOG-MIB", "tmnxLogApPortType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogAccountingPolicyGroup = tmnxLogAccountingPolicyGroup.setStatus('current')
tmnxLogFileIdGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 4)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileIdRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdStorageType"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdRolloverTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdRetainTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdAdminLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdOperLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdDescription"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdPathName"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdCreateTime"), ("TIMETRA-LOG-MIB", "tmnxLogFileIdBackupLoc"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogFileIdGroup = tmnxLogFileIdGroup.setStatus('current')
tmnxLogSyslogGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 5)).setObjects(("TIMETRA-LOG-MIB", "tmnxSyslogTargetRowStatus"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetDescription"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetAddress"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetUdpPort"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetFacility"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetSeverity"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetMessagePrefix"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetMessagesDropped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogSyslogGroup = tmnxLogSyslogGroup.setStatus('obsolete')
tmnxSnmpTrapGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 6)).setObjects(("TIMETRA-LOG-MIB", "tmnxStgRowStatus"), ("TIMETRA-LOG-MIB", "tmnxStgDescription"), ("TIMETRA-LOG-MIB", "tmnxStgVersion"), ("TIMETRA-LOG-MIB", "tmnxStgNotifyCommunity"), ("TIMETRA-LOG-MIB", "tmnxStgSecurityLevel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSnmpTrapGroup = tmnxSnmpTrapGroup.setStatus('obsolete')
tmnxLogEventsR2r1Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 10)).setObjects(("TIMETRA-LOG-MIB", "tmnxEventAppName"), ("TIMETRA-LOG-MIB", "tmnxEventName"), ("TIMETRA-LOG-MIB", "tmnxEventSeverity"), ("TIMETRA-LOG-MIB", "tmnxEventControl"), ("TIMETRA-LOG-MIB", "tmnxEventCounter"), ("TIMETRA-LOG-MIB", "tmnxEventDropCount"), ("TIMETRA-LOG-MIB", "tmnxEventReset"), ("TIMETRA-LOG-MIB", "tmnxEventTest"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogEventsR2r1Group = tmnxLogEventsR2r1Group.setStatus('obsolete')
tmnxLogNotifyObjsR3r0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 13)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedFileId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedName"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedCreateTime"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorTitle"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorMessage"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotifyObjsR3r0Group = tmnxLogNotifyObjsR3r0Group.setStatus('obsolete')
tmnxLogNotificationR3r0Group = NotificationGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 14)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogSpaceContention"), ("TIMETRA-LOG-MIB", "tmnxLogAdminLocFailed"), ("TIMETRA-LOG-MIB", "tmnxLogBackupLocFailed"), ("TIMETRA-LOG-MIB", "tmnxLogFileRollover"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeleted"), ("TIMETRA-LOG-MIB", "tmnxTestEvent"), ("TIMETRA-LOG-MIB", "tmnxLogTraceError"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotificationR3r0Group = tmnxLogNotificationR3r0Group.setStatus('obsolete')
tmnxLogV4v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 15)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogIdRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogIdStorageType"), ("TIMETRA-LOG-MIB", "tmnxLogIdAdminStatus"), ("TIMETRA-LOG-MIB", "tmnxLogIdOperStatus"), ("TIMETRA-LOG-MIB", "tmnxLogIdDescription"), ("TIMETRA-LOG-MIB", "tmnxLogIdFilterId"), ("TIMETRA-LOG-MIB", "tmnxLogIdSource"), ("TIMETRA-LOG-MIB", "tmnxLogIdDestination"), ("TIMETRA-LOG-MIB", "tmnxLogIdFileId"), ("TIMETRA-LOG-MIB", "tmnxLogIdSyslogId"), ("TIMETRA-LOG-MIB", "tmnxLogIdMaxMemorySize"), ("TIMETRA-LOG-MIB", "tmnxLogIdConsoleSession"), ("TIMETRA-LOG-MIB", "tmnxLogIdForwarded"), ("TIMETRA-LOG-MIB", "tmnxLogIdDropped"), ("TIMETRA-LOG-MIB", "tmnxLogIdTimeFormat"), ("TIMETRA-LOG-MIB", "tmnxLogFilterRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogFilterDescription"), ("TIMETRA-LOG-MIB", "tmnxLogFilterDefaultAction"), ("TIMETRA-LOG-MIB", "tmnxLogFilterInUse"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsDescription"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsAction"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsApplication"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsApplOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsNumber"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsNumberOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSeverity"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSeverityOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSubject"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSubjectOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSubjectRegexp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV4v0Group = tmnxLogV4v0Group.setStatus('obsolete')
tmnxSnmpSetErrsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 16)).setObjects(("TIMETRA-LOG-MIB", "tmnxSnmpSetErrsMax"), ("TIMETRA-LOG-MIB", "tmnxSseVersion"), ("TIMETRA-LOG-MIB", "tmnxSseSeverityLevel"), ("TIMETRA-LOG-MIB", "tmnxSseModuleId"), ("TIMETRA-LOG-MIB", "tmnxSseModuleName"), ("TIMETRA-LOG-MIB", "tmnxSseErrorCode"), ("TIMETRA-LOG-MIB", "tmnxSseErrorName"), ("TIMETRA-LOG-MIB", "tmnxSseErrorMsg"), ("TIMETRA-LOG-MIB", "tmnxSseExtraText"), ("TIMETRA-LOG-MIB", "tmnxSseTimestamp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSnmpSetErrsGroup = tmnxSnmpSetErrsGroup.setStatus('current')
tmnxLogEventsV5v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 17)).setObjects(("TIMETRA-LOG-MIB", "tmnxEventAppName"), ("TIMETRA-LOG-MIB", "tmnxEventName"), ("TIMETRA-LOG-MIB", "tmnxEventSeverity"), ("TIMETRA-LOG-MIB", "tmnxEventControl"), ("TIMETRA-LOG-MIB", "tmnxEventCounter"), ("TIMETRA-LOG-MIB", "tmnxEventDropCount"), ("TIMETRA-LOG-MIB", "tmnxEventReset"), ("TIMETRA-LOG-MIB", "tmnxEventThrottle"), ("TIMETRA-LOG-MIB", "tmnxEventTest"), ("TIMETRA-LOG-MIB", "tmnxEventThrottleLimit"), ("TIMETRA-LOG-MIB", "tmnxEventThrottleInterval"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogEventsV5v0Group = tmnxLogEventsV5v0Group.setStatus('current')
tmnxLogNotifyObjsV5v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 18)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedFileId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedName"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedCreateTime"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorTitle"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorMessage"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEventID"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEvents"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetId"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetProblemDescr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotifyObjsV5v0Group = tmnxLogNotifyObjsV5v0Group.setStatus('obsolete')
tmnxLogNotificationV5v0Group = NotificationGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 19)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogSpaceContention"), ("TIMETRA-LOG-MIB", "tmnxLogAdminLocFailed"), ("TIMETRA-LOG-MIB", "tmnxLogBackupLocFailed"), ("TIMETRA-LOG-MIB", "tmnxLogFileRollover"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeleted"), ("TIMETRA-LOG-MIB", "tmnxTestEvent"), ("TIMETRA-LOG-MIB", "tmnxLogTraceError"), ("TIMETRA-LOG-MIB", "tmnxLogEventThrottled"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetProblem"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotificationV5v0Group = tmnxLogNotificationV5v0Group.setStatus('obsolete')
tmnxLogSyslogV5v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 20)).setObjects(("TIMETRA-LOG-MIB", "tmnxSyslogTargetRowStatus"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetDescription"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetUdpPort"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetFacility"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetSeverity"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetMessagePrefix"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetMessagesDropped"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetAddrType"), ("TIMETRA-LOG-MIB", "tmnxSyslogTargetAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogSyslogV5v0Group = tmnxLogSyslogV5v0Group.setStatus('current')
tmnxSnmpTrapV5v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 21)).setObjects(("TIMETRA-LOG-MIB", "tmnxSnmpTrapLogDescription"), ("TIMETRA-LOG-MIB", "tmnxStdRowStatus"), ("TIMETRA-LOG-MIB", "tmnxStdRowLastChanged"), ("TIMETRA-LOG-MIB", "tmnxStdDestAddrType"), ("TIMETRA-LOG-MIB", "tmnxStdDestAddr"), ("TIMETRA-LOG-MIB", "tmnxStdDestPort"), ("TIMETRA-LOG-MIB", "tmnxStdDescription"), ("TIMETRA-LOG-MIB", "tmnxStdVersion"), ("TIMETRA-LOG-MIB", "tmnxStdNotifyCommunity"), ("TIMETRA-LOG-MIB", "tmnxStdSecurityLevel"), ("TIMETRA-LOG-MIB", "tmnxStdMaxTargets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSnmpTrapV5v0Group = tmnxSnmpTrapV5v0Group.setStatus('current')
tmnxLogV5v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 22)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogIdRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogIdStorageType"), ("TIMETRA-LOG-MIB", "tmnxLogIdAdminStatus"), ("TIMETRA-LOG-MIB", "tmnxLogIdOperStatus"), ("TIMETRA-LOG-MIB", "tmnxLogIdDescription"), ("TIMETRA-LOG-MIB", "tmnxLogIdFilterId"), ("TIMETRA-LOG-MIB", "tmnxLogIdSource"), ("TIMETRA-LOG-MIB", "tmnxLogIdDestination"), ("TIMETRA-LOG-MIB", "tmnxLogIdFileId"), ("TIMETRA-LOG-MIB", "tmnxLogIdSyslogId"), ("TIMETRA-LOG-MIB", "tmnxLogIdMaxMemorySize"), ("TIMETRA-LOG-MIB", "tmnxLogIdConsoleSession"), ("TIMETRA-LOG-MIB", "tmnxLogIdForwarded"), ("TIMETRA-LOG-MIB", "tmnxLogIdDropped"), ("TIMETRA-LOG-MIB", "tmnxLogIdTimeFormat"), ("TIMETRA-LOG-MIB", "tmnxLogFilterRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogFilterDescription"), ("TIMETRA-LOG-MIB", "tmnxLogFilterDefaultAction"), ("TIMETRA-LOG-MIB", "tmnxLogFilterInUse"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsDescription"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsAction"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsApplication"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsApplOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsNumber"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsNumberOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSeverity"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSeverityOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSubject"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSubjectOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsSubjectRegexp"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsRouter"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsRouterOperator"), ("TIMETRA-LOG-MIB", "tmnxLogFilterParamsRouterRegexp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogV5v0Group = tmnxLogV5v0Group.setStatus('current')
tmnxLogObsoleteObjsV5v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 23)).setObjects(("TIMETRA-LOG-MIB", "tmnxSyslogTargetAddress"), ("TIMETRA-LOG-MIB", "tmnxStgRowStatus"), ("TIMETRA-LOG-MIB", "tmnxStgDescription"), ("TIMETRA-LOG-MIB", "tmnxStgVersion"), ("TIMETRA-LOG-MIB", "tmnxStgNotifyCommunity"), ("TIMETRA-LOG-MIB", "tmnxStgSecurityLevel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogObsoleteObjsV5v0Group = tmnxLogObsoleteObjsV5v0Group.setStatus('current')
tmnxLogNotifyObjsV6v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 24)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedFileId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedName"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedCreateTime"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorTitle"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorMessage"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEventID"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEvents"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetId"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetProblemDescr"), ("TIMETRA-LOG-MIB", "tmnxLogNotifyApInterval"), ("TIMETRA-LOG-MIB", "tmnxStdReplayStartEvent"), ("TIMETRA-LOG-MIB", "tmnxStdReplayEndEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotifyObjsV6v0Group = tmnxLogNotifyObjsV6v0Group.setStatus('obsolete')
tmnxLogNotificationV6v0Group = NotificationGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 25)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogSpaceContention"), ("TIMETRA-LOG-MIB", "tmnxLogAdminLocFailed"), ("TIMETRA-LOG-MIB", "tmnxLogBackupLocFailed"), ("TIMETRA-LOG-MIB", "tmnxLogFileRollover"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeleted"), ("TIMETRA-LOG-MIB", "tmnxTestEvent"), ("TIMETRA-LOG-MIB", "tmnxLogTraceError"), ("TIMETRA-LOG-MIB", "tmnxLogEventThrottled"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetProblem"), ("TIMETRA-LOG-MIB", "tmnxLogAccountingDataLoss"), ("TIMETRA-LOG-MIB", "tmnxStdEventsReplayed"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotificationV6v0Group = tmnxLogNotificationV6v0Group.setStatus('current')
tmnxSnmpTrapDestV6v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 26)).setObjects(("TIMETRA-LOG-MIB", "tmnxStdReplay"), ("TIMETRA-LOG-MIB", "tmnxStdReplayStart"), ("TIMETRA-LOG-MIB", "tmnxStdReplayLastTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxSnmpTrapDestV6v0Group = tmnxSnmpTrapDestV6v0Group.setStatus('current')
tmnxLogAccountingPolicyV6v1Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 27)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogApDefaultInterval"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogAccountingPolicyV6v1Group = tmnxLogAccountingPolicyV6v1Group.setStatus('current')
tmnxLogAccountingPolicyCRV7v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 28)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogApCrLastChanged"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeDelta"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeQueue"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeOCntr"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeQICounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeQECounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeOICounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeOECounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrSignChangeAACounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrAACounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrQueueRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogApCrQueueLastChanged"), ("TIMETRA-LOG-MIB", "tmnxLogApCrQueueICounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrQueueECounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrOverrideCntrRowStatus"), ("TIMETRA-LOG-MIB", "tmnxLogApCrOverrideCntrLastChngd"), ("TIMETRA-LOG-MIB", "tmnxLogApCrOverrideCntrICounters"), ("TIMETRA-LOG-MIB", "tmnxLogApCrOverrideCntrECounters"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogAccountingPolicyCRV7v0Group = tmnxLogAccountingPolicyCRV7v0Group.setStatus('current')
tmnxLogRoutePreferenceV7v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 29)).setObjects(("TIMETRA-LOG-MIB", "tmnxEventPrimaryRoutePref"), ("TIMETRA-LOG-MIB", "tmnxEventSecondaryRoutePref"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogRoutePreferenceV7v0Group = tmnxLogRoutePreferenceV7v0Group.setStatus('current')
tmnxLogNotifyObjsV8v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 30)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedFileId"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLogType"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedLocation"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedName"), ("TIMETRA-LOG-MIB", "tmnxLogFileDeletedCreateTime"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorTitle"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorSubject"), ("TIMETRA-LOG-MIB", "tmnxLogTraceErrorMessage"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEventID"), ("TIMETRA-LOG-MIB", "tmnxLogThrottledEvents"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetId"), ("TIMETRA-LOG-MIB", "tmnxSysLogTargetProblemDescr"), ("TIMETRA-LOG-MIB", "tmnxLogNotifyApInterval"), ("TIMETRA-LOG-MIB", "tmnxStdReplayStartEvent"), ("TIMETRA-LOG-MIB", "tmnxStdReplayEndEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotifyObjsV8v0Group = tmnxLogNotifyObjsV8v0Group.setStatus('current')
tmnxLogNotificationV9v0Group = NotificationGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 31)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogEventOverrun"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotificationV9v0Group = tmnxLogNotificationV9v0Group.setStatus('current')
tmnxLogEventDampedV8v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 32)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogConfigEventsDamped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogEventDampedV8v0Group = tmnxLogEventDampedV8v0Group.setStatus('current')
tmnxLogApV9v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 33)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogApDataLossCount"), ("TIMETRA-LOG-MIB", "tmnxLogApLastDataLossTimeStamp"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogApV9v0Group = tmnxLogApV9v0Group.setStatus('current')
tmnxLogExRbkOpGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 34)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogExRbkOpTblLastChange"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpMaxEntries"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpLastChanged"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpType"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpStatus"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpBegin"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpEnd"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpFile"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpUser"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkOpNumEvents"), ("TIMETRA-LOG-MIB", "tmnxLogExRbkEventOID"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogExRbkOpGroup = tmnxLogExRbkOpGroup.setStatus('current')
tmnxLogNotifyObjsV10v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 35)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogExecRollbackOpIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogNotifyObjsV10v0Group = tmnxLogNotifyObjsV10v0Group.setStatus('current')
tmnxLogApExtGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 36)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogApToFileType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogApExtGroup = tmnxLogApExtGroup.setStatus('current')
tmnxLogAppRouteNotifV10v0Group = ObjectGroup((1, 3, 6, 1, 4, 1, 6527, 3, 1, 1, 12, 2, 37)).setObjects(("TIMETRA-LOG-MIB", "tmnxLogColdStartWaitTime"), ("TIMETRA-LOG-MIB", "tmnxLogRouteRecoveryWaitTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
tmnxLogAppRouteNotifV10v0Group = tmnxLogAppRouteNotifV10v0Group.setStatus('current')
mibBuilder.exportSymbols("TIMETRA-LOG-MIB", tmnxLogFileDeletedName=tmnxLogFileDeletedName, tmnxEventAppIndex=tmnxEventAppIndex, TmnxLogFilterEntryId=TmnxLogFilterEntryId, tmnxLogFileIdBackupLoc=tmnxLogFileIdBackupLoc, tmnxEventCounter=tmnxEventCounter, tmnxStdReplay=tmnxStdReplay, tmnxLogApCrSignChangeDelta=tmnxLogApCrSignChangeDelta, tmnxLogIdForwarded=tmnxLogIdForwarded, tmnxLogGroups=tmnxLogGroups, tmnxLogApStorageType=tmnxLogApStorageType, tmnxLogFileIdStorageType=tmnxLogFileIdStorageType, tmnxStdDestAddr=tmnxStdDestAddr, tmnxLogApRowStatus=tmnxLogApRowStatus, tmnxEventThrottleLimit=tmnxEventThrottleLimit, tmnxLogCompliances=tmnxLogCompliances, tmnxLogApCrQueueLastChanged=tmnxLogApCrQueueLastChanged, tmnxSnmpTrapLogEntry=tmnxSnmpTrapLogEntry, tmnxLogExRbkOpIndex=tmnxLogExRbkOpIndex, tmnxStdDescription=tmnxStdDescription, tmnxLogApCrOverrideCntrId=tmnxLogApCrOverrideCntrId, tmnxSyslogTargetMessagePrefix=tmnxSyslogTargetMessagePrefix, tmnxLogFilterParamsApplication=tmnxLogFilterParamsApplication, tmnxLogV8v0Compliance=tmnxLogV8v0Compliance, tmnxLogIdMaxMemorySize=tmnxLogIdMaxMemorySize, tmnxSnmpSetErrsGroup=tmnxSnmpSetErrsGroup, tmnxLogConfigEventsDamped=tmnxLogConfigEventsDamped, tmnxSseModuleName=tmnxSseModuleName, tmnxLogFilterInUse=tmnxLogFilterInUse, tmnxLogNotifyObjsV8v0Group=tmnxLogNotifyObjsV8v0Group, tmnxSseRequestId=tmnxSseRequestId, tmnxLogFilterDefaultAction=tmnxLogFilterDefaultAction, TmnxLogFileId=TmnxLogFileId, tmnxLogApDataLossCount=tmnxLogApDataLossCount, tmnxStgDestAddress=tmnxStgDestAddress, tmnxStdDestPort=tmnxStdDestPort, tmnxStdReplayStartEvent=tmnxStdReplayStartEvent, tmnxLogApDefaultInterval=tmnxLogApDefaultInterval, tmnxLogThrottledEventID=tmnxLogThrottledEventID, tmnxLogExRbkEventOID=tmnxLogExRbkEventOID, tmnxLogIdIndex=tmnxLogIdIndex, TmnxSyslogSeverity=TmnxSyslogSeverity, tmnxEventAppEntry=tmnxEventAppEntry, tmnxLogNotificationV6v0Group=tmnxLogNotificationV6v0Group, tmnxLogFileIdRolloverTime=tmnxLogFileIdRolloverTime, tmnxLogApRecord=tmnxLogApRecord, tmnxEventDropCount=tmnxEventDropCount, tmnxSseModuleId=tmnxSseModuleId, tmnxLogFileDeletedLogType=tmnxLogFileDeletedLogType, tmnxStgDescription=tmnxStgDescription, tmnxSyslogTargetIndex=tmnxSyslogTargetIndex, tmnxLogExecRollbackEventTable=tmnxLogExecRollbackEventTable, tmnxLogNotifyObjsV10v0Group=tmnxLogNotifyObjsV10v0Group, tmnxLogAccountingPolicyV6v1Group=tmnxLogAccountingPolicyV6v1Group, tmnxLogNotifyPrefix=tmnxLogNotifyPrefix, tmnxLogExRbkOpStatus=tmnxLogExRbkOpStatus, tmnxLogAppRouteNotifV10v0Group=tmnxLogAppRouteNotifV10v0Group, tmnxLogRouteRecoveryWaitTime=tmnxLogRouteRecoveryWaitTime, tmnxSnmpTrapGroupTable=tmnxSnmpTrapGroupTable, tmnxLogIdStorageType=tmnxLogIdStorageType, tmnxLogFilterParamsRouterRegexp=tmnxLogFilterParamsRouterRegexp, tmnxLogBackupLocFailed=tmnxLogBackupLocFailed, tmnxLogV4v0Group=tmnxLogV4v0Group, PYSNMP_MODULE_ID=timetraLogMIBModule, tmnxStgDestPort=tmnxStgDestPort, tmnxLogApAdminStatus=tmnxLogApAdminStatus, tmnxLogExRbkOpGroup=tmnxLogExRbkOpGroup, tmnxStdReplayEndEvent=tmnxStdReplayEndEvent, tmnxLogTraceError=tmnxLogTraceError, tmnxLogV5v0Group=tmnxLogV5v0Group, tmnxSnmpTrapV5v0Group=tmnxSnmpTrapV5v0Group, tmnxLogApCrSignChangeQueue=tmnxLogApCrSignChangeQueue, tmnxLogApCrOverrideCntrICounters=tmnxLogApCrOverrideCntrICounters, TmnxSyslogId=TmnxSyslogId, tmnxLogAccountingPolicyCRV7v0Group=tmnxLogAccountingPolicyCRV7v0Group, tmnxSnmpSetErrsMax=tmnxSnmpSetErrsMax, tmnxLogExRbkNotifyObjects=tmnxLogExRbkNotifyObjects, tmnxLogTraceErrorSubject=tmnxLogTraceErrorSubject, tmnxLogApCrOverrideCntrECounters=tmnxLogApCrOverrideCntrECounters, tmnxLogFilterParamsTable=tmnxLogFilterParamsTable, tmnxLogExRbkOpType=tmnxLogExRbkOpType, tmnxLogFilterParamsRouterOperator=tmnxLogFilterParamsRouterOperator, tmnxSnmpTrapGroupEntry=tmnxSnmpTrapGroupEntry, tmnxSseTimestamp=tmnxSseTimestamp, tmnxEventSecondaryRoutePref=tmnxEventSecondaryRoutePref, tmnxLogExRbkOpMaxEntries=tmnxLogExRbkOpMaxEntries, tmnxLogExRbkOpEnd=tmnxLogExRbkOpEnd, tmnxEventAppName=tmnxEventAppName, tmnxLogV10v0Compliance=tmnxLogV10v0Compliance, tmnxLogFileIdLogId=tmnxLogFileIdLogId, TmnxPerceivedSeverity=TmnxPerceivedSeverity, tmnxStgIndex=tmnxStgIndex, tmnxLogExecRollbackOpTable=tmnxLogExecRollbackOpTable, tmnxLogColdStartWaitTime=tmnxLogColdStartWaitTime, tmnxLogIdDescription=tmnxLogIdDescription, tmnxEventThrottleInterval=tmnxEventThrottleInterval, tmnxEventPrimaryRoutePref=tmnxEventPrimaryRoutePref, tmnxLogApToFileId=tmnxLogApToFileId, tmnxLogIdDestination=tmnxLogIdDestination, tmnxSnmpSetErrsEntry=tmnxSnmpSetErrsEntry, TmnxLogIdIndex=TmnxLogIdIndex, tmnxLogFilterParamsNumberOperator=tmnxLogFilterParamsNumberOperator, tmnxLogApCustRecordQueueEntry=tmnxLogApCustRecordQueueEntry, tmnxLogNotificationV5v0Group=tmnxLogNotificationV5v0Group, tmnxEventControl=tmnxEventControl, tmnxLogAccountingDataLoss=tmnxLogAccountingDataLoss, tmnxLogTraceErrorTitle=tmnxLogTraceErrorTitle, tmnxLogExRbkOpNumEvents=tmnxLogExRbkOpNumEvents, tmnxSyslogTargetDescription=tmnxSyslogTargetDescription, tmnxLogFileIdEntry=tmnxLogFileIdEntry, tmnxEventReset=tmnxEventReset, tmnxLogApCrSignChangeAACounters=tmnxLogApCrSignChangeAACounters, tmnxSseErrorName=tmnxSseErrorName, TmnxLogFilterOperator=TmnxLogFilterOperator, tmnxLogObsoleteObjsV5v0Group=tmnxLogObsoleteObjsV5v0Group, tmnxLogFilterParamsAction=tmnxLogFilterParamsAction, tmnxLogFileId=tmnxLogFileId, tmnxSyslogTargetMessagesDropped=tmnxSyslogTargetMessagesDropped, tmnxLogExecRollbackEventEntry=tmnxLogExecRollbackEventEntry, tmnxLogFileIdRowStatus=tmnxLogFileIdRowStatus, tmnxLogV7v0Compliance=tmnxLogV7v0Compliance, tmnxStgRowStatus=tmnxStgRowStatus, tmnxLogSyslogV5v0Group=tmnxLogSyslogV5v0Group, TmnxUdpPort=TmnxUdpPort, tmnxEventName=tmnxEventName, tmnxEventAppTable=tmnxEventAppTable, tmnxLogFileIdRetainTime=tmnxLogFileIdRetainTime, tmnxSnmpTrapDestV6v0Group=tmnxSnmpTrapDestV6v0Group, tmnxLogFilterParamsSubject=tmnxLogFilterParamsSubject, tmnxLogObjs=tmnxLogObjs, tmnxLogIdAdminStatus=tmnxLogIdAdminStatus, tmnxLogMaxLogs=tmnxLogMaxLogs, tmnxLogIdTable=tmnxLogIdTable, tmnxLogNotifications=tmnxLogNotifications, tmnxLogFilterParamsSeverity=tmnxLogFilterParamsSeverity, tmnxSyslogTargetTable=tmnxSyslogTargetTable, tmnxSseErrorCode=tmnxSseErrorCode, tmnxLogEventHistGeneralObjs=tmnxLogEventHistGeneralObjs, tmnxSysLogTargetId=tmnxSysLogTargetId, tmnxLogSyslogGroup=tmnxLogSyslogGroup, tmnxStdRowLastChanged=tmnxStdRowLastChanged, tmnxSnmpTrapDestEntry=tmnxSnmpTrapDestEntry, tmnxLogApCrOverrideCntrEntry=tmnxLogApCrOverrideCntrEntry, tmnxLogFilterTable=tmnxLogFilterTable, tmnxLogV4v0Compliance=tmnxLogV4v0Compliance, tmnxLogTraceErrorMessage=tmnxLogTraceErrorMessage, tmnxLogEventDampedV8v0Group=tmnxLogEventDampedV8v0Group, timetraLogMIBModule=timetraLogMIBModule, tmnxLogIdSyslogId=tmnxLogIdSyslogId, tmnxLogFilterParamsRowStatus=tmnxLogFilterParamsRowStatus, tmnxSyslogTargetSeverity=tmnxSyslogTargetSeverity, tmnxLogIdFileId=tmnxLogIdFileId, tmnxLogIdEntry=tmnxLogIdEntry, tmnxStgVersion=tmnxStgVersion, tmnxLogApInterval=tmnxLogApInterval, tmnxLogExRbkOpFile=tmnxLogExRbkOpFile, tmnxLogFileDeletedLocation=tmnxLogFileDeletedLocation, tmnxLogApEntry=tmnxLogApEntry, tmnxLogNotifyObjsV6v0Group=tmnxLogNotifyObjsV6v0Group, tmnxEventEntry=tmnxEventEntry, tmnxLogFilterParamsIndex=tmnxLogFilterParamsIndex, tmnxLogFilterId=tmnxLogFilterId, tmnxLogExRbkEventIndex=tmnxLogExRbkEventIndex, TmnxLogFileType=TmnxLogFileType, tmnxLogFileIdCreateTime=tmnxLogFileIdCreateTime, tmnxLogV9v0Compliance=tmnxLogV9v0Compliance, tmnxSseAddress=tmnxSseAddress, tmnxEventSeverity=tmnxEventSeverity, tmnxLogFilterParamsSubjectOperator=tmnxLogFilterParamsSubjectOperator, tmnxStdNotifyCommunity=tmnxStdNotifyCommunity, tmnxLogApCrQueueRowStatus=tmnxLogApCrQueueRowStatus, tmnxLogConformance=tmnxLogConformance, tmnxSyslogTargetAddress=tmnxSyslogTargetAddress, tmnxEventTable=tmnxEventTable, tmnxLogApCustRecordQueueTable=tmnxLogApCustRecordQueueTable, tmnxStdEventsReplayed=tmnxStdEventsReplayed, tmnxLogGlobalGroup=tmnxLogGlobalGroup, tmnxLogNotifyObjsV5v0Group=tmnxLogNotifyObjsV5v0Group, tmnxLogExecRollbackOpIndex=tmnxLogExecRollbackOpIndex, tmnxLogFilterParamsApplOperator=tmnxLogFilterParamsApplOperator, tmnxLogFileDeletedLogId=tmnxLogFileDeletedLogId, tmnxLogIdFilterId=tmnxLogIdFilterId, tmnxLogFilterParamsNumber=tmnxLogFilterParamsNumber, tmnxEventID=tmnxEventID, tmnxLogFileDeletedFileId=tmnxLogFileDeletedFileId, tmnxLogFilterParamsDescription=tmnxLogFilterParamsDescription, tmnxSseSeverityLevel=tmnxSseSeverityLevel, TmnxSyslogFacility=TmnxSyslogFacility, TmnxEventNumber=TmnxEventNumber, tmnxLogAccountingPolicyGroup=tmnxLogAccountingPolicyGroup, tmnxLogFileIdAdminLocation=tmnxLogFileIdAdminLocation, tmnxTestEvent=tmnxTestEvent, tmnxLogExRbkOpTblLastChange=tmnxLogExRbkOpTblLastChange, tmnxStdDestAddrType=tmnxStdDestAddrType, tmnxStdReplayLastTime=tmnxStdReplayLastTime, tmnxLogApCrLastChanged=tmnxLogApCrLastChanged, tmnxLogApToFileType=tmnxLogApToFileType, tmnxLogApCrSignChangeQICounters=tmnxLogApCrSignChangeQICounters, tmnxLogFilterDescription=tmnxLogFilterDescription, tmnxLogEventHistoryObjs=tmnxLogEventHistoryObjs, tmnxLogIdConsoleSession=tmnxLogIdConsoleSession, tmnxSyslogTargetUdpPort=tmnxSyslogTargetUdpPort, tmnxSseSnmpPort=tmnxSseSnmpPort, tmnxStgSecurityLevel=tmnxStgSecurityLevel, tmnxLogV6v1Compliance=tmnxLogV6v1Compliance, tmnxSnmpSetErrsTable=tmnxSnmpSetErrsTable, tmnxSseVersion=tmnxSseVersion, tmnxLogApPolicyId=tmnxLogApPolicyId, tmnxLogApDescription=tmnxLogApDescription, tmnxLogApCrOverrideCntrRowStatus=tmnxLogApCrOverrideCntrRowStatus, tmnxSysLogTargetProblem=tmnxSysLogTargetProblem, tmnxLogIdOperStatus=tmnxLogIdOperStatus, tmnxLogApCrSignChangeOICounters=tmnxLogApCrSignChangeOICounters, tmnxLogFilterParamsSeverityOperator=tmnxLogFilterParamsSeverityOperator, tmnxLogApPortType=tmnxLogApPortType, tmnxSseErrorMsg=tmnxSseErrorMsg, tmnxLogApOperStatus=tmnxLogApOperStatus, tmnxLogApCustRecordTable=tmnxLogApCustRecordTable, tmnxLogFileIdGroup=tmnxLogFileIdGroup, tmnxLogApTable=tmnxLogApTable, tmnxLogAdminLocFailed=tmnxLogAdminLocFailed, tmnxSyslogTargetAddrType=tmnxSyslogTargetAddrType, tmnxLogEventThrottled=tmnxLogEventThrottled, tmnxStdVersion=tmnxStdVersion, tmnxLogNotifyObjsR3r0Group=tmnxLogNotifyObjsR3r0Group, tmnxEventThrottle=tmnxEventThrottle, tmnxLogApCrSignChangeOECounters=tmnxLogApCrSignChangeOECounters, tmnxSnmpTrapLogDescription=tmnxSnmpTrapLogDescription, tmnxLogApCrQueueECounters=tmnxLogApCrQueueECounters, tmnxSyslogTargetRowStatus=tmnxSyslogTargetRowStatus, tmnxLogApDefault=tmnxLogApDefault, tmnxLogApCrOverrideCntrLastChngd=tmnxLogApCrOverrideCntrLastChngd, tmnxLogApCustRecordEntry=tmnxLogApCustRecordEntry, tmnxLogExRbkOpUser=tmnxLogExRbkOpUser, tmnxLogRoutePreferenceV7v0Group=tmnxLogRoutePreferenceV7v0Group, tmnxLogFileIdPathName=tmnxLogFileIdPathName, tmnxSnmpTrapDestTable=tmnxSnmpTrapDestTable, tmnxSnmpTrapGroup=tmnxSnmpTrapGroup, tmnxLogNotificationV9v0Group=tmnxLogNotificationV9v0Group, tmnxLogFileIdDescription=tmnxLogFileIdDescription, tmnxLogFileIdLogType=tmnxLogFileIdLogType, tmnxLogFilterParamsRouter=tmnxLogFilterParamsRouter, tmnxEventTest=tmnxEventTest, tmnxLogFileIdTable=tmnxLogFileIdTable, tmnxLogApCrQueueICounters=tmnxLogApCrQueueICounters, TmnxLogFilterId=TmnxLogFilterId, tmnxSseExtraText=tmnxSseExtraText, tmnxLogFileRollover=tmnxLogFileRollover, tmnxLogApCrOverrideCntrTable=tmnxLogApCrOverrideCntrTable, tmnxLogIdTimeFormat=tmnxLogIdTimeFormat, tmnxLogSpaceContention=tmnxLogSpaceContention, tmnxSyslogTargetFacility=tmnxSyslogTargetFacility, tmnxLogExecRollbackOpEntry=tmnxLogExecRollbackOpEntry, tmnxStdName=tmnxStdName, tmnxStdReplayStart=tmnxStdReplayStart)
mibBuilder.exportSymbols("TIMETRA-LOG-MIB", tmnxLogApCrQueueId=tmnxLogApCrQueueId, tmnxLogFileIdOperLocation=tmnxLogFileIdOperLocation, tmnxLogNotificationObjects=tmnxLogNotificationObjects, tmnxLogIdDropped=tmnxLogIdDropped, tmnxLogFilterRowStatus=tmnxLogFilterRowStatus, tmnxStdRowStatus=tmnxStdRowStatus, tmnxLogFilterParamsEntry=tmnxLogFilterParamsEntry, tmnxLogExRbkOpBegin=tmnxLogExRbkOpBegin, tmnxLogApExtGroup=tmnxLogApExtGroup, tmnxLogEventsV5v0Group=tmnxLogEventsV5v0Group, tmnxLogIdRowStatus=tmnxLogIdRowStatus, tmnxLogApLastDataLossTimeStamp=tmnxLogApLastDataLossTimeStamp, tmnxLogApCrSignChangeQECounters=tmnxLogApCrSignChangeQECounters, tmnxSnmpTrapLogTable=tmnxSnmpTrapLogTable, tmnxLogThrottledEvents=tmnxLogThrottledEvents, tmnxLogApCrAACounters=tmnxLogApCrAACounters, tmnxLogFileDeletedCreateTime=tmnxLogFileDeletedCreateTime, tmnxLogNotifyApInterval=tmnxLogNotifyApInterval, tmnxSyslogTargetEntry=tmnxSyslogTargetEntry, tmnxLogEventOverrun=tmnxLogEventOverrun, tmnxLogIdSource=tmnxLogIdSource, tmnxSseAddressType=tmnxSseAddressType, tmnxStgNotifyCommunity=tmnxStgNotifyCommunity, tmnxLogFilterEntry=tmnxLogFilterEntry, tmnxStdSecurityLevel=tmnxStdSecurityLevel, tmnxStdIndex=tmnxStdIndex, tmnxLogV6v0Compliance=tmnxLogV6v0Compliance, tmnxLogApV9v0Group=tmnxLogApV9v0Group, tmnxLogFilterParamsSubjectRegexp=tmnxLogFilterParamsSubjectRegexp, tmnxSyslogTargetAddr=tmnxSyslogTargetAddr, tmnxLogExRbkOpLastChanged=tmnxLogExRbkOpLastChanged, tmnxLogV5v0Compliance=tmnxLogV5v0Compliance, tmnxLogNotificationR3r0Group=tmnxLogNotificationR3r0Group, TmnxCFlash=TmnxCFlash, tmnxLogEventsR2r1Group=tmnxLogEventsR2r1Group, TmnxSyslogIdOrEmpty=TmnxSyslogIdOrEmpty, tmnxSysLogTargetProblemDescr=tmnxSysLogTargetProblemDescr, tmnxLogApCrSignChangeOCntr=tmnxLogApCrSignChangeOCntr, tmnxStdMaxTargets=tmnxStdMaxTargets, tmnxLogFileDeleted=tmnxLogFileDeleted)
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.contrib import admin
from django.contrib.admin.util import unquote, model_ngettext
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.utils import formats, simplejson
from django.utils.translation import ugettext_lazy, ugettext as _
from locking.models import ObjectLockedError
class LockableAdmin(admin.ModelAdmin):
class Media():
css = {
'all': ('locking/css/locking.css',)
}
js = (
'locking/js/admin.locking.js',
'locking/js/jquery.url.packed.js',
)
def force_unlock(self, request, queryset):
"""
Admin action to force unlocking all objects in `queryset`.
Intended for superusers.
"""
if not self.has_change_permission(request):
raise PermissionDenied
for obj in queryset:
obj.unlock()
n = queryset.count()
if n:
self.message_user(request, _("Successfully unlocked %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
})
force_unlock.short_description = ugettext_lazy("Force unlocking")
def unlock_view(self, request, object_id, extra_context=None):
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Users who don't have exclusive access to an object anymore may still
# request we unlock an object. This happens e.g. when a user navigates
# away from an edit screen that's been open for very long.
# When this happens, LockableModel.unlock_for will throw an exception,
# and we just ignore the request.
# That way, any new lock that may since have been put in place by another
# user won't get accidentally overwritten.
try:
obj.unlock_for(request.user)
obj._is_a_locking_request = True
return HttpResponse(status=200)
except ObjectLockedError:
return HttpResponse(status=403)
def refresh_lock_view(self, request, object_id, extra_context=None):
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
try:
obj.lock_for(request.user)
except ObjectLockedError:
# The user tried to overwrite an existing lock by another user.
# No can do, pal!
return HttpResponse(status=409) # Conflict
# Format date like a DateTimeInput would have done
format = formats.get_format('DATETIME_INPUT_FORMATS')[0]
original_locked_at = obj.locked_at.strftime(format)
original_modified_at = obj.modified_at.strftime(format)
response = simplejson.dumps({
'original_locked_at': original_locked_at,
'original_modified_at': original_modified_at,
})
return HttpResponse(response, mimetype="application/json")
def get_urls(self):
"""
Override get_urls() to add a locking URLs.
"""
urls = super(LockableAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
locking_urls = patterns('',
url(r'^(.+)/unlock/$',
self.admin_site.admin_view(self.unlock_view),
name='unlock_%s_%s' % info),
url(r'^(.+)/refresh_lock/$',
self.admin_site.admin_view(self.refresh_lock_view),
name='refresh_lock_%s_%s' % info),
)
return locking_urls + urls
def changelist_view(self, request, extra_context=None):
# we need the request objects in a few places where it's usually not present,
# so we're tacking it on to the LockableAdmin class
self.request = request
return super(LockableAdmin, self).changelist_view(request, extra_context)
def save_model(self, request, obj, form, change, *args, **kwargs):
# object creation doesn't need/have locking in place
if not form.is_locking_disabled() and obj.pk:
obj.unlock_for(request.user)
super(LockableAdmin, self).save_model(request, obj, form, change, *args,
**kwargs)
def get_object(self, request, object_id):
obj = super(LockableAdmin, self).get_object(request, object_id)
if obj is not None:
obj._request_user = request.user
return obj
def lock(self, obj):
message = ''
if obj.is_locked:
seconds_remaining = obj.lock_seconds_remaining
minutes_remaining = seconds_remaining / 60
if self.request.user == obj.locked_by:
locked_until_self = _("You have a lock on this article for %s more minutes.") \
% (minutes_remaining)
message = '<img src="%slocking/img/page_edit.png" title="%s" />' \
% (settings.MEDIA_URL, locked_until_self)
else:
locked_until = _("Still locked for %(minutes)s minutes by %(user)s") \
% {"minutes": minutes_remaining, "user": obj.locked_by}
message = '<img src="%slocking/img/lock.png" title="%s" />' \
% (settings.MEDIA_URL, locked_until)
return message
lock.allow_tags = True
list_display = ('__str__', 'lock')
|
# coding=utf-8
from celery import Celery
from flask import current_app
from . import celeryconfig
factorial_app = Celery("factorial_app")
factorial_app.config_from_object(celeryconfig)
@factorial_app.task(shared=False)
def my_factorial(n):
result = 1
for i in range(1, n+1):
result *= i
print(f"{n}! = {result}") # noqa
|
from pathlib import Path
from pymmcore_plus import CMMCorePlus
from useq import MDASequence
from raman_mda_engine import RamanEngine
metadata = {
"raman": {
"z": "center",
"channel": "BF",
},
}
mda = MDASequence(
metadata=metadata,
stage_positions=[(100, 100, 30), (200, 150, 35)],
channels=["BF", "DAPI"],
time_plan={"interval": 1, "loops": 20},
z_plan={"range": 4, "step": 0.5},
axis_order="tpcz",
)
print(mda.axis_order.index("z"))
print(mda.shape)
core = CMMCorePlus.instance()
cfg = Path(__file__).parent.parent / "tests" / "test-config.cfg"
core.loadSystemConfiguration(cfg)
engine = RamanEngine()
core.register_mda_engine(engine)
core.run_mda(mda)
|
import datetime
from decimal import Decimal
from itertools import cycle
from django.test import TestCase
from contracts.mommy_recipes import get_contract_recipe
from ..models import Contract, convert_to_tsquery
class ContractTestCase(TestCase):
def make_contract_with_rates(self, **kwargs):
final_kwargs = dict(
hourly_rate_year1=100.00,
hourly_rate_year2=102.20,
hourly_rate_year3=103.30,
hourly_rate_year4=104.40,
hourly_rate_year5=105.50)
final_kwargs.update(kwargs)
return get_contract_recipe().make(**final_kwargs)
def test_readable_business_size(self):
business_sizes = ('O', 'S')
contract1, contract2 = get_contract_recipe().make(
_quantity=2, business_size=cycle(business_sizes))
self.assertEqual(contract1.get_readable_business_size(),
'other than small business')
self.assertEqual(
contract2.get_readable_business_size(), 'small business')
def test_get_education_code(self):
c = get_contract_recipe().make()
self.assertEqual(c.get_education_code('Bachelors'), 'BA')
self.assertIsNone(c.get_education_code('Nursing'), None)
def test_normalize_rate(self):
c = get_contract_recipe().make()
self.assertEqual(c.normalize_rate('$1,000.00,'), 1000.0)
def test_convert_to_tsquery(self):
self.assertEqual(convert_to_tsquery(
'staff consultant'), 'staff:* & consultant:*')
self.assertEqual(convert_to_tsquery(
'senior typist (st)'), 'senior:* & typist:* & st:*')
self.assertEqual(convert_to_tsquery('@$(#)%&**#'), '')
def test_get_hourly_rate(self):
c = self.make_contract_with_rates()
self.assertEqual(c.get_hourly_rate(1), 100.00)
self.assertEqual(c.get_hourly_rate(2), 102.20)
self.assertEqual(c.get_hourly_rate(3), 103.30)
self.assertEqual(c.get_hourly_rate(4), 104.40)
self.assertEqual(c.get_hourly_rate(5), 105.50)
def test_get_hourly_rate_raises_on_invalid_year(self):
c = get_contract_recipe().make()
with self.assertRaises(ValueError):
c.get_hourly_rate(0)
with self.assertRaises(ValueError):
c.get_hourly_rate(6)
def test_set_hourly_rate(self):
c = get_contract_recipe().make()
for i in range(1, 6):
c.set_hourly_rate(i, 120)
self.assertEqual(
getattr(c, 'hourly_rate_year{}'.format(i)), 120)
def test_set_hourly_rate_raises_on_invalid_year(self):
c = get_contract_recipe().make()
with self.assertRaises(ValueError):
c.set_hourly_rate(0, 50)
with self.assertRaises(ValueError):
c.set_hourly_rate(6, 50)
def test_escalate_hourly_rate_fields(self):
c = get_contract_recipe().make()
c.escalate_hourly_rate_fields(base_year_rate=100,
escalation_rate=2.5)
self.assertEqual(c.hourly_rate_year1, 100)
self.assertAlmostEqual(c.hourly_rate_year2, Decimal(102.50), places=2)
self.assertAlmostEqual(c.hourly_rate_year3, Decimal(105.06), places=2)
self.assertAlmostEqual(c.hourly_rate_year4, Decimal(107.69), places=2)
self.assertAlmostEqual(c.hourly_rate_year5, Decimal(110.38), places=2)
def test_escalate_sets_prices_to_base_year_rate_when_no_escalation(self):
c = get_contract_recipe().make()
c.escalate_hourly_rate_fields(base_year_rate=125.40,
escalation_rate=0)
self.assertEqual(c.hourly_rate_year1, 125.40)
self.assertEqual(c.hourly_rate_year2, 125.40)
self.assertEqual(c.hourly_rate_year3, 125.40)
self.assertEqual(c.hourly_rate_year4, 125.40)
self.assertEqual(c.hourly_rate_year5, 125.40)
def test_escalate_hourly_rate_fields_raises_on_invalid_rate(self):
c = get_contract_recipe().make()
with self.assertRaises(ValueError):
c.escalate_hourly_rate_fields(base_year_rate=100,
escalation_rate=-5)
with self.assertRaises(ValueError):
c.escalate_hourly_rate_fields(base_year_rate=100,
escalation_rate=100)
with self.assertRaises(ValueError):
c.escalate_hourly_rate_fields(base_year_rate=100,
escalation_rate=99.1)
def test_update_price_fields(self):
c = self.make_contract_with_rates(
contract_start=datetime.date(2016, 2, 11),
contract_end=datetime.date(2021, 2, 11))
c.contract_year = 1
c.update_price_fields()
self.assertEqual(c.current_price, 100.00)
self.assertEqual(c.next_year_price, 102.20)
self.assertEqual(c.second_year_price, 103.30)
c.contract_year = 2
c.update_price_fields()
self.assertEqual(c.current_price, 102.20)
self.assertEqual(c.next_year_price, 103.30)
self.assertEqual(c.second_year_price, 104.40)
c.contract_year = 3
c.update_price_fields()
self.assertEqual(c.current_price, 103.30)
self.assertEqual(c.next_year_price, 104.40)
self.assertEqual(c.second_year_price, 105.50)
c.contract_year = 4
c.update_price_fields()
self.assertEqual(c.current_price, 104.40)
self.assertEqual(c.next_year_price, 105.50)
self.assertIsNone(c.second_year_price)
c.contract_year = 5
c.update_price_fields()
self.assertEqual(c.current_price, 105.50)
self.assertIsNone(c.next_year_price)
self.assertIsNone(c.second_year_price)
def test_update_price_fields_works_for_future_contract_start(self):
c = self.make_contract_with_rates(
contract_start=datetime.date(2016, 2, 11),
contract_end=datetime.date(2020, 2, 11))
c.contract_year = 1
c.update_price_fields()
self.assertEqual(c.current_price, 100.00)
self.assertEqual(c.next_year_price, 102.20)
self.assertEqual(c.second_year_price, 103.30)
c.contract_year = 0
c.update_price_fields()
self.assertEqual(c.current_price, None)
self.assertEqual(c.next_year_price, 100.00)
self.assertEqual(c.second_year_price, 102.20)
c.contract_year = -1
c.update_price_fields()
self.assertEqual(c.current_price, None)
self.assertEqual(c.next_year_price, None)
self.assertEqual(c.second_year_price, 100.00)
c.contract_year = -2
c.update_price_fields()
self.assertEqual(c.current_price, None)
self.assertEqual(c.next_year_price, None)
self.assertEqual(c.second_year_price, None)
def test_update_price_fields_sets_to_None_when_current_year_gt_5(self):
c = get_contract_recipe().make(
contract_year=6,
contract_start=datetime.date(2017, 2, 11),
contract_end=datetime.date(2021, 2, 11))
c.update_price_fields()
self.assertIsNone(c.current_price)
self.assertIsNone(c.next_year_price)
self.assertIsNone(c.second_year_price)
def test_update_price_fields_raises_when_no_contract_year(self):
c = get_contract_recipe().make(contract_year=None)
with self.assertRaises(ValueError):
c.update_price_fields()
def test_update_price_fields_takes_contract_end_into_account(self):
c = self.make_contract_with_rates(
contract_year=1,
contract_start=datetime.date(2016, 2, 11),
contract_end=datetime.date(2017, 1, 1))
c.update_price_fields()
self.assertEqual(c.current_price, 100.00)
self.assertEqual(c.next_year_price, None)
self.assertEqual(c.second_year_price, None)
c.contract_end = datetime.date(2018, 1, 1)
c.update_price_fields()
self.assertEqual(c.current_price, 100.00)
self.assertEqual(c.next_year_price, 102.20)
self.assertEqual(c.second_year_price, None)
c.contract_end = datetime.date(2019, 1, 1)
c.update_price_fields()
self.assertEqual(c.current_price, 100.00)
self.assertEqual(c.next_year_price, 102.20)
self.assertEqual(c.second_year_price, 103.30)
def test_adjust_contract_year(self):
c = get_contract_recipe().make(
contract_start=datetime.date(2016, 2, 11))
c.adjust_contract_year(
current_date=datetime.date(2016, 2, 12))
self.assertEqual(c.contract_year, 1)
c.adjust_contract_year(
current_date=datetime.date(2015, 2, 1))
self.assertEqual(c.contract_year, 0)
def test_adjust_contract_year_raises_when_no_contract_start(self):
c = get_contract_recipe().make(contract_start=None)
with self.assertRaises(ValueError):
c.adjust_contract_year()
def test_calculate_end_year(self):
c = get_contract_recipe().make(
contract_start=datetime.date(2016, 2, 11),
contract_end=datetime.date(2020, 2, 12))
self.assertEqual(c.calculate_end_year(), 5)
c.contract_end = datetime.date(2018, 2, 12)
self.assertEqual(c.calculate_end_year(), 3)
def test_calculate_end_year_maxes_at_5(self):
c = get_contract_recipe().make(
contract_start=datetime.date(2016, 2, 11),
contract_end=datetime.date(2030, 2, 12))
self.assertEqual(c.calculate_end_year(), 5)
def test_calculate_end_year_raises_when_no_contract_start_or_end(self):
d = datetime.date(2016, 2, 11)
c = get_contract_recipe().make(
contract_start=None,
contract_end=d)
with self.assertRaises(ValueError):
c.calculate_end_year()
c.contract_start = d
c.contract_end = None
with self.assertRaises(ValueError):
c.calculate_end_year()
def test_calculate_end_year_raises_when_start_after_end(self):
c = get_contract_recipe().make(
contract_start=datetime.date(2020, 2, 11),
contract_end=datetime.date(2015, 2, 12))
with self.assertRaises(ValueError):
c.calculate_end_year()
class ContractSearchTestCase(TestCase):
CATEGORIES = [
'Sign Language Interpreter',
'Foreign Language Staff Interpreter (Spanish sign language)',
'Aircraft Servicer',
'Service Order Dispatcher',
'Disposal Services',
'Interpretation Services Class 4: Afrikan,Akan,Albanian',
'Interpretation Services Class 1: Spanish',
'Interpretation Services Class 2: French, German, Italian',
]
def assertCategoriesEqual(self, results, categories):
result_categories = [r.labor_category for r in results]
self.assertEqual(sorted(result_categories), sorted(categories))
def setUp(self):
self.contracts = get_contract_recipe().make(
labor_category=cycle(self.CATEGORIES),
_quantity=len(self.CATEGORIES)
)
def test_multi_phrase_search_works_with_single_word_phrase(self):
results = Contract.objects.multi_phrase_search('interpretation')
self.assertCategoriesEqual(results, [
u'Sign Language Interpreter',
u'Foreign Language Staff Interpreter (Spanish sign language)',
u'Interpretation Services Class 4: Afrikan,Akan,Albanian',
u'Interpretation Services Class 1: Spanish',
u'Interpretation Services Class 2: French, German, Italian'
])
def test_multi_phrase_search_works_with_multi_word_phrase(self):
results = Contract.objects.multi_phrase_search([
'interpretation services'
])
self.assertCategoriesEqual(results, [
u'Interpretation Services Class 4: Afrikan,Akan,Albanian',
u'Interpretation Services Class 1: Spanish',
u'Interpretation Services Class 2: French, German, Italian'
])
def test_multi_phrase_search_works_with_multiple_phrases(self):
results = Contract.objects.multi_phrase_search([
'interpretation services',
'disposal'
])
self.assertCategoriesEqual(results, [
u'Disposal Services',
u'Interpretation Services Class 4: Afrikan,Akan,Albanian',
u'Interpretation Services Class 1: Spanish',
u'Interpretation Services Class 2: French, German, Italian'
])
def test_search_index_works_via_raw_sql(self):
results = Contract.objects.raw(
'''
SELECT id, labor_category
FROM contracts_contract
WHERE search_index @@ to_tsquery('Interpretation')
ORDER BY id
'''
)
self.assertCategoriesEqual(results, [
u'Sign Language Interpreter',
u'Foreign Language Staff Interpreter (Spanish sign language)',
u'Interpretation Services Class 4: Afrikan,Akan,Albanian',
u'Interpretation Services Class 1: Spanish',
u'Interpretation Services Class 2: French, German, Italian'
])
|
import commands
import glob
import os
import sys
from waflib import Build, Logs, Node, Options, Task, TaskGen, Utils
##----------------------------------------------------------------##
@TaskGen.extension('.mm')
def mm_hook(self, node):
"Bind the c++ file extensions to the creation of a :py:class:`waflib.Tools.cxx.cxx` instance"
return self.create_compiled_task('mm', node)
class mm(Task.Task):
"Compile MM files into object files"
run_str = '${CXX} ${ARCH_ST:ARCH} ${MMFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}'
vars = ['CXXDEPS'] # unused variable to depend on, just in case
ext_in = ['.h'] # set the build order easily by using ext_out=['.h']
# ##----------------------------------------------------------------##
_IOS_ARCH = 'armv7'
_IOS_DEVICE = 'iPhoneOS'
def GetDevRootIOS():
PLATFORMS_PATHS = [
'/Applications/Xcode.app/Contents/Developer/Platforms',
'/Developer/Platforms',
]
for pdir in PLATFORMS_PATHS:
d = '%s/%s.platform/Developer' % (pdir, _IOS_DEVICE )
if os.path.exists(d):
return d
return '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain'
def prepareEnvIOS( ctx ):
minimalVersion = '4.3'
devRoot = GetDevRootIOS()
ctx.env.IOS_DEV_ROOT = devRoot
device = ctx.env.IOS_DEVICE or _IOS_DEVICE
arch = ctx.env.IOS_ARCH or _IOS_ARCH
for ver in ['4.3','5.0', '5.1', '6.0', '6.1']:
path = '{0}/SDKs/{1}{2}.sdk'.format( devRoot, device, ver )
if os.path.isdir(path): break
ctx.env.IOS_SDK_ROOT = SDKRoot = path
# Compiler and linker flags
cflags = '-arch %s -pipe -miphoneos-version-min=%s -isysroot %s ' % ( arch, minimalVersion, SDKRoot )
ctx.env.CC = devRoot + '/usr/bin/gcc'
ctx.env.CXX = devRoot + '/usr/bin/g++'
ctx.env.LINK_CC = devRoot + '/usr/bin/gcc'
ctx.env.LINK_CXX = devRoot + '/usr/bin/g++'
ctx.env.CXXFLAGS = Utils.to_list(cflags) # -std=c++0x -no-cpp-precomp
# ctx.env.CXXFLAGS.append += '-x object')
ctx.env.CXXFLAGS.append('-I%s/usr/include' % SDKRoot )
ctx.env.DEFINES.append('IOS')
ctx.env.CXXFLAGS.append( '-F%s/System/Library/Frameworks' % SDKRoot )
ctx.env.LINKFLAGS.append( '-F%s/System/Library/Frameworks' % SDKRoot )
ctx.env.MMFLAGS = ctx.env.CXXFLAGS[:]
# ctx.env.CXXFLAGS.append(
# '-I%s/System/Library/Frameworks/OpenGLES.framework/Headers/ES1' % SDKRoot )
# ctx.env.CXXFLAGS.append(
# '-I%s/System/Library/Frameworks/OpenGLES.framework/Headers/ES2' % SDKRoot )
# ctx.env.DEFINES.append('WHISPER_DATA_MUTEX_POOL_SIZE=25')
# ctx.env.LINKFLAGS += [ '-arch', arch ]
# ctx.env.LINKFLAGS += [ '-isysroot', SDKRoot ]
# ctx.env.LINKFLAGS += [ '-miphoneos-version-min=' + minimalVersion ]
ctx.env.CFLAGS = ctx.env.CXXFLAGS
ctx.env.LINKFLAGS = ctx.env.CXXFLAGS
# if device != 'iPhoneOS.Simulator':
# ctx.env.LINKFLAGS.extend(['-syslibroot', SDKRoot])
# ##----------------------------------------------------------------##
# _ANDROID_NDK_PATH = '/Users/tommo/dev/android-ndk-r8'
# _ANDROID_NDK_VER = '14'
# _ANDROID_NDK_ARCH = 'arm'
# def GetSDKRootAndroid():
# return _ANDROID_NDK_PATH + '/platforms/android-%s/arch-%s' % ( _ANDROID_NDK_VER, _ANDROID_NDK_ARCH )
# ##----------------------------------------------------------------## |
import pygame
from .system import System
from component import config
from logcat import LogCat
class GameWindow(System):
def __init__(self, window, background):
super().__init__()
self._window = window
self._background = background
self._ui_font = pygame.freetype.SysFont(
config.font["family"],
config.font["size"]
)
self.on("cmd_clear", self._clear_screen)
@LogCat.log_func
def _clear_screen(self, e):
self._window.blit(self._background, (0, 0))
@LogCat.log_func
def render(self, sprite, rect):
self._window.blit(sprite, rect)
@LogCat.log_func
def box(self, fps):
sfps, rect = self._ui_font.render(f"fps: {fps}", (255, 255, 0))
self._window.blit(sfps, (0, 0))
# game_window.py
|
import networkx as nx
import csv
def compute_top_k(map__node_id__score, k=20):
list__node_id__score = [(node_id, score) for node_id, score in map__node_id__score.items()]
list__node_id__score.sort(key=lambda x: (-x[1], x[0]))
return list__node_id__score[:k]
complete_input_graph_file_name = "./Part_2/dataset/pkmn_graph_data.tsv"
k = 6
# Graph creation by reading the list of weighted edges from file...
input_file_handler = open(complete_input_graph_file_name, 'r', encoding="utf-8")
csv_reader = csv.reader(input_file_handler, delimiter='\t', quotechar='"', quoting=csv.QUOTE_NONE)
list__u_v_weight = []
list_u_v = []
for record in csv_reader:
u = record[0]
v = record[1]
weight = int(1)
list__u_v_weight.append((u, v, weight))
list_u_v.append((u, v))
input_file_handler.close()
# graph with all edges has weight 1 (will be used for 2.1)
graph = nx.Graph()
graph.add_weighted_edges_from(list__u_v_weight)
# graph with no edge weight (will be used for 2.2)
graph_2 = nx.Graph()
graph_2.add_edges_from(list_u_v)
# Part 2.1
################################################################################
### The following `topic_specific` function takes as input set of pokemon name(s)
### It retrieves the Top-K nodes in the graph
### using as score the Topic-Specific-PageRank score of a node.
################################################################################
def topic_specific(pokemon_names):
#
# Creation of the teleporting probability distribution for the selected Topic
set__all_x_node_ids = set()
set__all_NOT_x_node_ids = set()
for node_id in graph:
if node_id in pokemon_names:
set__all_x_node_ids.add(node_id)
else:
set__all_NOT_x_node_ids.add(node_id)
map_teleporting_probability_distribution__node_id__probability = {node_id: 1. / len(set__all_x_node_ids) for
node_id in set__all_x_node_ids}
for node_id in set__all_NOT_x_node_ids:
map_teleporting_probability_distribution__node_id__probability[node_id] = 0.
map__node_id__node_pagerank_value = nx.pagerank(graph, alpha=0.33,
personalization=map_teleporting_probability_distribution__node_id__probability,
weight='weight')
# Extract the Top-K node identifiers according to the PageRank score.
top_k__node_id__node_pagerank_value = compute_top_k(map__node_id__node_pagerank_value, k)
# nodes set is created to return only the members according to topic-specific pagerank algorithm
nodes = set()
for i in range(len(top_k__node_id__node_pagerank_value)):
nodes.add(top_k__node_id__node_pagerank_value[i][0])
return nodes
Set_A = {"Pikachu"}
Set_B = set(["Venusaur", "Charizard", "Blastoise"])
Set_C = set(["Excadrill", "Dracovish", "Whimsicott", "Milotic"])
g_a = topic_specific(Set_A)
g_b = topic_specific(Set_B)
g_c = topic_specific(Set_C)
g_1 = topic_specific("Charizard")
g_2 = topic_specific("Venusaur")
g_3 = topic_specific("Kingdra")
g_4 = topic_specific(set(["Charizard", "Venusaur"]))
g_5 = topic_specific(set(["Charizard", "Kingdra"]))
g_6 = topic_specific(set(["Venusaur", "Kingdra"]))
# Compute the number of team members inside the Team(Charizard, Venusaur) that are neither
# in Team(Charizard) nor in Team(Venusaur)
u_1_2 = set().union(g_1, g_2)
print(len(g_4.difference(u_1_2)))
# Compute the number of team members inside the Team(Charizard, Kingdra) that are neither in Team(Charizard)
# nor in Team(Kingdra)
u_1_3 = set().union(g_1, g_3)
print(len(g_5.difference(u_1_3)))
# Compute the number of team members inside the Team(Venusaur, Kingdra) that are
# neither in Team(Venusaur) nor in Team(Kingdra)
u_2_3 = set().union(g_2, g_3)
print(len(g_6.difference(u_2_3)))
# PART 2.2
def compute_good_local_community(graph, seed_node_id, alpha=0.9):
# Creation of the teleporting probability distribution for the selected node...
map_teleporting_probability_distribution__node_id__probability = {}
for node_id in graph:
map_teleporting_probability_distribution__node_id__probability[node_id] = 0.
map_teleporting_probability_distribution__node_id__probability[seed_node_id] = 1.
# Computation of the PageRank vector.
map__node_id__node_pagerank_value = nx.pagerank(graph, alpha=alpha,
personalization=map_teleporting_probability_distribution__node_id__probability)
# Put all nodes in a list and sort the list in descending order of the “normalized_scoreâ€.
sorted_list__node_id__normalized_score = [(node_id, score / graph.degree[node_id])
for node_id, score in map__node_id__node_pagerank_value.items()]
sorted_list__node_id__normalized_score.sort(key=lambda x: (-x[1], x[0]))
# LET'S SWEEP!
index_representing_the_set_of_node_ids_with_maximum_conductance = -1
min_conductance_value = float("+inf")
set__node_ids_in_the_candidate_community = set()
set__node_ids_in_the_COMPLEMENT_of_the_candidate_community_to_the_entire_set_of_nodes = set(graph.nodes())
for sweep_index in range(0, len(sorted_list__node_id__normalized_score) - 1):
# Creation of the set of nodes representing the candidate community and
# its complement to the entire set of nodes in the graph.
current_node_id = sorted_list__node_id__normalized_score[sweep_index][0]
set__node_ids_in_the_candidate_community.add(current_node_id)
set__node_ids_in_the_COMPLEMENT_of_the_candidate_community_to_the_entire_set_of_nodes.remove(current_node_id)
#
# Evaluation of the quality of the candidate community according to its conductance value.
conductance_value = nx.algorithms.cuts.conductance(graph,
set__node_ids_in_the_candidate_community,
set__node_ids_in_the_COMPLEMENT_of_the_candidate_community_to_the_entire_set_of_nodes)
# Discard local communities with conductance 0 or 1.
if conductance_value == 0. or conductance_value == 1.:
continue
# Discard the nodes in local community if it is the member in the greater than 140's position.
if len(set__node_ids_in_the_candidate_community) > 140:
continue
# Update the values of variables representing the best solution generated so far.
if conductance_value < min_conductance_value:
min_conductance_value = conductance_value
index_representing_the_set_of_node_ids_with_maximum_conductance = sweep_index
# Creation of the set of nodes representing the best local community generated by the sweeping procedure.
set__node_ids_with_minimum_conductance = set([node_id for node_id, normalized_score in
sorted_list__node_id__normalized_score[
:index_representing_the_set_of_node_ids_with_maximum_conductance + 1]])
return set__node_ids_with_minimum_conductance, min_conductance_value
# different alpha values will be tried and the best will be considered as the best community
alphas = [0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1, 0.05]
nodes = graph_2.nodes
# `best_conductance_lists` is created to add the best community's conductance value for each pokemon
best_conductance_lists = []
# `local_community_list` is created to add the best community's members for each pokemon
local_community_list = []
for node in nodes:
# `conductance_list` and `local_community` are created temporarily to choose the best local community
# for each pokemon at the end of each alpha value tried
conductance_list = []
local_community = []
for a in alphas:
set_local_community_for_node, conductance_value_for_local_community_for_node = compute_good_local_community(
graph_2, node, alpha=a)
conductance_list.append(conductance_value_for_local_community_for_node)
local_community.append(set_local_community_for_node)
min_index = conductance_list.index(min(conductance_list))
best_conductance_lists.append((conductance_list[min_index]))
local_community_list.append(local_community[min_index])
# `community_frequency` dictionary is created to store pokemon names as key and community_frequency of key(pokemon) as a value
community_frequency = {}
for i in graph_2.nodes:
community_frequency[i] = 0
for community in local_community_list:
for pokemon in community:
community_frequency[pokemon] = community_frequency[pokemon] + 1
sort_com_frequency = sorted(community_frequency.items(), key=lambda x: x[1], reverse=True)
# The most 5 frequent pokemon and its' frequency value
print(sort_com_frequency[:5])
# The least 5 frequent pokemon and its' frequency value
print(sort_com_frequency[-5:])
# Writing the tsv file
with open('./Part_2/output.tsv', 'wt') as out_file:
tsv_writer = csv.writer(out_file, delimiter='\t')
tsv_writer.writerow(["pokemon_name", "number_of_nodes_in_the_local_comunity", "conductance_value_of_the_local_comunity"])
for node in sorted(graph_2.nodes):
index = list(graph_2.nodes).index(node)
result = [node, len(local_community_list[index]), best_conductance_lists[index]]
tsv_writer.writerow(result)
|
import contextlib
from typing import Any, List, Optional
from flask import Flask
from werkzeug.routing import Rule
from . import util
from .blueprint.blueprint import openapi_documentation
from .converters.base import Converter
from .documentation import Documentation
from .exceptions import MissingConfigContext, MissingConverter
from .specification import (
Info,
MediaType,
OpenAPI,
Operation,
PathItem,
Paths,
RequestBody,
Response,
Responses,
Server,
)
class DocumentationOptions:
def __init__(
self,
include_head_response: bool = True,
include_options_response: bool = True,
server_url: str = "/",
include_marshmallow_converters: bool = True,
include_documentation_blueprint: bool = True,
):
self.include_head_response: bool = include_head_response
self.include_options_response: bool = include_options_response
self.server_url: str = server_url
self.include_marshmallow_converters: bool = include_marshmallow_converters
self.include_documentation_blueprint: bool = include_documentation_blueprint
class OpenApiDocumentation:
"""OpenAPI Documentation builder for your Flask REST API.
Tow ways to use the OpenApiDocumentation:
Option 1: This is binding the instance to a specific Flask application:
>>> app = Flask(__name__)
>>> documentation = OpenApiDocumentation(app=app)
Option 2: Create the object once and configure the application later to support it:
>>> documentation = OpenApiDocumentation()
>>> app = Flask(__name__)
>>> documentation.init_app(app=app)
"""
def __init__(
self,
app: Optional[Flask] = None,
title: str = "Open API REST documentation",
version: str = "1.0.0",
options: Optional[DocumentationOptions] = None,
):
self.app: Optional[Flask] = app
"""After self.init_app is called, the self.app must not be None anymore."""
self.options: DocumentationOptions = (
options if options is not None else DocumentationOptions()
)
"""Global documentation options for the builder."""
self.specification = OpenAPI(
info=Info(title=title, version=version),
paths=Paths(),
servers=[Server(url=self.options.server_url)],
)
"""The specification that is generated using the builder."""
self.builder = OpenAPIBuilder(open_api_documentation=self)
"""The builder used for iterating the endpoints. This is done in order to generate the
configuration configuration."""
if self.app is not None:
self.init_app(app)
def init_app(self, app: Flask):
"""Initialises the application."""
if not app or not isinstance(app, Flask):
raise TypeError("Invalid Flask app instance.")
if self.options.include_documentation_blueprint:
app.register_blueprint(openapi_documentation)
app.before_first_request(self.builder.iterate_endpoints)
# Register the extension in the app.
app.extensions["__open_api_doc__"] = self
self.app = app
def get_configuration(self):
"""Returns the OpenAPI configuration specification as a dictionary."""
return self.specification.get_value()
class OpenAPIBuilder:
"""OpenAPI builder for generating the documentation."""
def __init__(self, open_api_documentation: OpenApiDocumentation):
self.converters: List[Converter] = []
self.open_api_documentation: OpenApiDocumentation = open_api_documentation
self.__documentation_config: Optional[Documentation] = None
if self.options.include_marshmallow_converters:
# Keep import below to support packages without marshmallow.
from openapi_builder.converters.marshmallow import (
register_marshmallow_converters,
)
register_marshmallow_converters(self)
def process(self, value: Any, name: Optional[str] = None):
"""Processes an instance, and returns a schema, or reference to that schema."""
if self.__documentation_config is None:
raise MissingConfigContext()
if name in self.__documentation_config.custom_converters:
return self.__documentation_config.custom_converters[name]
converter = next(
(
converter
for converter in self.converters
if isinstance(value, converter.converts_class)
),
None,
)
if converter is None:
raise MissingConverter(value=value)
return converter.convert(value=value)
@contextlib.contextmanager
def use_documentation_config(self, documentation_config: Documentation):
"""Context manager for function that need to be executed with a documentation_config."""
if not isinstance(documentation_config, Documentation):
raise TypeError(
f"{documentation_config} is not an instance of Documentation."
)
self.__documentation_config = documentation_config
yield
self.__documentation_config = None
def iterate_endpoints(self):
"""Iterates the endpoints of the Flask application to generate the documentation.
This function is executed before the first request is processed in the corresponding
Flask application.
"""
for rule in self.open_api_documentation.app.url_map._rules:
view_func = self.open_api_documentation.app.view_functions[rule.endpoint]
config: Documentation = getattr(view_func, "__open_api_doc__", None)
if config is None:
# endpoint has no documentation configuration -> skip
continue
with self.use_documentation_config(config):
self.process_rule(rule)
def process_rule(self, rule: Rule):
"""Processes a Werkzeug rule.
The function must be called within the use_documentation_config-context manager.
Usage:
>>> builder = OpenAPIBuilder()
>>> config = Documentation() # retrieved from the @add_documentation decorator.
>>> with builder.use_documentation_config(config):
>>> builder.process_rule(rule)
"""
if self.__documentation_config is None:
raise MissingConfigContext()
view_func = self.open_api_documentation.app.view_functions[rule.endpoint]
parameters = list(self.__documentation_config.parameters)
parameters.extend(util.parse_openapi_arguments(rule))
endpoint_name = util.openapi_endpoint_name_from_rule(rule)
if endpoint_name not in self.paths.values:
self.paths.values[endpoint_name] = PathItem(parameters=parameters)
path_item = self.paths.values[endpoint_name]
for method in rule.methods:
values = {}
for key, schema in self.__documentation_config.responses.items():
reference = self.process(schema)
values[key] = Response(
description=self.__documentation_config.description
or view_func.__doc__,
content={"application/json": MediaType(schema=reference)},
)
if self.__documentation_config.input_schema is not None:
schema_or_reference = self.process(
self.__documentation_config.input_schema
)
request_body = RequestBody(
description=self.__documentation_config.description,
content={"application/json": MediaType(schema=schema_or_reference)},
)
else:
request_body = None
operation = Operation(
summary=self.__documentation_config.summary,
description=self.__documentation_config.description,
responses=Responses(values=values),
request_body=request_body,
tags=self.__documentation_config.tags,
)
if method == "GET":
path_item.get = operation
if method == "HEAD" and self.options.include_head_response:
path_item.head = operation
if method == "OPTIONS" and self.options.include_options_response:
path_item.options = operation
if method == "POST":
path_item.post = operation
if method == "PUT":
path_item.put = operation
def register_converter(self, converter):
"""Register a converter for this builder."""
self.converters.append(converter)
@property
def schemas(self):
"""Helper property to return the schemas."""
return self.open_api_documentation.specification.components.schemas
@property
def paths(self):
"""Helper property to return the schemas."""
return self.open_api_documentation.specification.paths
@property
def options(self):
"""Helper property to return the options."""
return self.open_api_documentation.options
|
import pressio4py as p4py
def test_version():
print(p4py.__version__)
if __name__ == '__main__':
test_version()
|
# -*- coding: utf-8-*-
import sys
import os
import time
import yaml
import lib.diagnose
from baseVoice import AbstractVoiceEngine
import lib.appPath
from lib.voice.snowboy import snowboydetect
class SnowboyVoice(AbstractVoiceEngine):
"""
Uses the snowboy hotword detector
"""
TAG = "snowboy"
def __init__(self, decoder_model,
resource=os.path.join(lib.appPath.DATA_PATH, "snowboy/resources/common.res"),
sensitivity=[],
hotwords=[]):
super(self.__class__, self).__init__()
self.hotwords = hotwords
tm = type(decoder_model)
ts = type(sensitivity)
if tm is not list:
decoder_model = [decoder_model]
if ts is not list:
sensitivity = [sensitivity]
model_str = ",".join(decoder_model)
self.detector = snowboydetect.SnowboyDetect(
resource_filename=resource.encode(), model_str=model_str.encode())
self.detector.SetAudioGain(1)
self.num_hotwords = self.detector.NumHotwords()
if len(decoder_model) > 1 and len(sensitivity) == 1:
sensitivity = sensitivity*self.num_hotwords
if len(sensitivity) != 0:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if len(sensitivity) != 0:
self.detector.SetSensitivity(sensitivity_str.encode())
@classmethod
def get_config(cls):
config = {}
config_path = os.path.join(lib.appPath.CONFIG_PATH, 'snowboy.yml')
if os.path.exists(config_path):
with open(config_path, 'r') as f:
profile = yaml.safe_load(f)
if 'hotwords' in profile:
config['hotwords'] = profile['hotwords'].split(',');
if 'sensitivity' in profile:
config['sensitivity'] = profile['sensitivity'].split(',');
if 'decoder_model' in profile:
config['decoder_model'] = profile['decoder_model'].split(',');
return config
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
lib.diagnose.check_python_import('lib.voice.snowboy.snowboydetect'))
def say(self, phrase, *args):
pass
def transcribe(self,fp):
if(type(fp) is str):
data = fp
else:
fp.seek(44)
data = fp.read()
ans = self.detector.RunDetection(data)
if ans == -1:
self._logger.warning("Error initializing streams or reading audio data")
if ans > 0:
message = "Keyword " + str(ans) + " detected at time: "
message += time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
message += " with " + self.TAG
self._logger.info(message)
return [self.hotwords[ans-1].encode('UTF-8')]
else:
return []
|
# 多項ロジスティック回帰
import numpy as np
import seaborn as sns
import pandas
import matplotlib.pyplot as plt
import mcmc_tools
from scipy.stats import norm
from sklearn.linear_model import LinearRegression
import time
# 商品購買
# Age: 年齢
# Sex: 性別
# Income: 収入
# Y: 購買商品ID
category = pandas.read_csv('data-category.txt')
print(category.head())
print(category.describe())
# 変数のスケーリングはかなり重要
# スケーリングしないと収束しないパターン。
# ベイズモデリングでは、サンプリングの効率を考えて、できるだけデータを原点周辺にもってくることが大切。
Y = category['Y']
Age = category['Age']/100
Sex = category['Sex']
Income = category['Income']/1000
K = Y.nunique()
N = len(Y)
stan_data = {
'N': N,
'K': K,
'Age': Age,
'Sex': Sex,
'Income': Income,
'Y': Y
}
# モデリングの工夫
# クラスKのカテゴリカル分布を使う場合、
# 次元がKのベクトルをデータい掛け合わせて線形結合をつくり(同様に次元Kのベクトル)、
# softmax関数への入力とするが、直感的な説明だと各ベクトルの次元の強さだと捉えることができる。
# しかしこのベクトルは空間内で並行移動(原点を移動)させても同じ作用をもつので、
# 識別が不可能になる。
# そのため、あるカテゴリーを選択する強さを定数で固定する。定数ならなんでも良いが、
# 0に固定するのがシンプルでわかりやすい。
# 固定する前は事後確率が安定せずサンプリングがうまく行かないが、固定すると進む。
# コンパイル
filename = '../model/model10-1-4'
start = time.time()
mcmc_result = mcmc_tools.sampling(filename, stan_data, n_jobs=4)
elapsed_time = time.time() - start
print("elapsed_time:{0}".format(elapsed_time) + "[sec]")
|
# -*-coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from alpharotate.libs.models.detectors.single_stage_base_network_batch import DetectionNetworkBase
from alpharotate.libs.models.losses.losses_fcos import LossFCOS
from alpharotate.libs.utils import nms_rotate
from alpharotate.libs.utils.coordinate_convert import backward_convert
from alpharotate.libs.models.samplers.fcos.sampler_fcos_r import SamplerFCOS
class DetectionNetworkFCOS(DetectionNetworkBase):
def __init__(self, cfgs, is_training):
super(DetectionNetworkFCOS, self).__init__(cfgs, is_training)
# self.cfgs = cfgs
# self.is_training = is_training
self.sampler_fcos = SamplerFCOS(cfgs)
self.losses = LossFCOS(self.cfgs)
# self.losses_dict = {}
# self.batch_size = cfgs.BATCH_SIZE if is_training else 1
# self.backbone = BuildBackbone(cfgs, is_training)
def rpn_cls_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(self.cfgs.NUM_SUBNET_CONV):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=256,
kernel_size=[3, 3],
stride=1,
activation_fn=None if self.cfgs.USE_GN else tf.nn.relu,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
scope='{}_{}'.format(scope_list[0], i),
reuse=reuse_flag)
if self.cfgs.USE_GN:
rpn_conv2d_3x3 = tf.contrib.layers.group_norm(rpn_conv2d_3x3)
rpn_conv2d_3x3 = tf.nn.relu(rpn_conv2d_3x3)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,
num_outputs=self.cfgs.CLASS_NUM,
kernel_size=[3, 3],
stride=1,
weights_initializer=self.cfgs.FINAL_CONV_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.FINAL_CONV_BIAS_INITIALIZER,
scope=scope_list[2],
activation_fn=None,
reuse=reuse_flag)
# rpn_box_scores = tf.reshape(rpn_box_scores, [self.batch_size, -1, self.cfgs.CLASS_NUM],
# name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.nn.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
return rpn_box_scores, rpn_box_probs
def rpn_reg_ctn_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(self.cfgs.NUM_SUBNET_CONV):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=self.cfgs.FPN_CHANNEL,
kernel_size=[3, 3],
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=None if self.cfgs.USE_GN else tf.nn.relu,
scope='{}_{}'.format(scope_list[1], i),
reuse=reuse_flag)
if self.cfgs.USE_GN:
rpn_conv2d_3x3 = tf.contrib.layers.group_norm(rpn_conv2d_3x3)
rpn_conv2d_3x3 = tf.nn.relu(rpn_conv2d_3x3)
rpn_box_offset = slim.conv2d(rpn_conv2d_3x3,
num_outputs=4,
kernel_size=[3, 3],
stride=1,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[4]+'_offset',
activation_fn=None,
reuse=reuse_flag)
# rpn_angle_sin = slim.conv2d(rpn_conv2d_3x3,
# num_outputs=1,
# kernel_size=[3, 3],
# stride=1,
# weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
# biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
# scope=scope_list[4] + '_sin',
# activation_fn=tf.nn.sigmoid,
# trainable=self.is_training,
# reuse=reuse_flag)
#
# rpn_angle_cos = slim.conv2d(rpn_conv2d_3x3,
# num_outputs=1,
# kernel_size=[3, 3],
# stride=1,
# weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
# biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
# scope=scope_list[4] + '_cos',
# activation_fn=tf.nn.sigmoid,
# trainable=self.is_training,
# reuse=reuse_flag)
# [-90, 90] sin in [-1, 1] cos in [0, 1]
# rpn_angle = (rpn_angle_sin - 0.5) * 2
rpn_angle = slim.conv2d(rpn_conv2d_3x3,
num_outputs=1,
kernel_size=[3, 3],
stride=1,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[4] + '_angle',
activation_fn=tf.nn.sigmoid,
trainable=self.is_training,
reuse=reuse_flag)
rpn_angle = (rpn_angle - 0.5) * 3.1415926 / 2
rpn_ctn_scores = slim.conv2d(rpn_conv2d_3x3,
num_outputs=1,
kernel_size=[3, 3],
stride=1,
weights_initializer=self.cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=self.cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[3],
activation_fn=None,
reuse=reuse_flag)
tf.summary.image('centerness_{}'.format(level),
tf.nn.sigmoid(tf.expand_dims(rpn_ctn_scores[0, :, :, :], axis=0)))
# rpn_ctn_scores = tf.reshape(rpn_ctn_scores, [self.batch_size, -1],
# name='rpn_{}_centerness_reshape'.format(level))
# rpn_box_offset = tf.reshape(rpn_box_offset, [self.batch_size, -1, 4],
# name='rpn_{}_regression_reshape'.format(level))
return rpn_box_offset, rpn_angle, rpn_ctn_scores
def rpn_net(self, feature_pyramid, name):
rpn_box_offset_list = []
rpn_box_scores_list = []
rpn_box_probs_list = []
rpn_cnt_scores_list = []
with tf.variable_scope(name):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)):
for level, stride in zip(self.cfgs.LEVEL, self.cfgs.ANCHOR_STRIDE):
if self.cfgs.SHARE_NET:
reuse_flag = None if level == self.cfgs.LEVEL[0] else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification',
'rpn_centerness', 'rpn_regression']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,
'rpn_classification_' + level, 'rpn_centerness' + level,
'rpn_regression_' + level]
rpn_box_scores, rpn_box_probs = self.rpn_cls_net(feature_pyramid[level],
scope_list, reuse_flag, level)
rpn_box_offset, rpn_angle, rpn_ctn_scores = self.rpn_reg_ctn_net(feature_pyramid[level],
scope_list, reuse_flag, level)
# si = tf.Variable(tf.constant(1.0),
# name='rpn_bbox_offsets_scale_'.format(level),
# dtype=tf.float32, trainable=True)
rpn_box_offset = tf.exp(rpn_box_offset) * stride
rpn_box_scores_list.append(rpn_box_scores)
rpn_box_probs_list.append(rpn_box_probs)
rpn_cnt_scores_list.append(rpn_ctn_scores)
rpn_box_offset_list.append(tf.concat([rpn_box_offset, rpn_angle], axis=-1))
return rpn_box_offset_list, rpn_box_scores_list, rpn_box_probs_list, rpn_cnt_scores_list
def _fcos_target(self, feature_pyramid, img_batch, gtboxes_batch):
with tf.variable_scope('fcos_target'):
fm_size_list = []
for level in self.cfgs.LEVEL:
featuremap_height, featuremap_width = tf.shape(feature_pyramid[level])[1], tf.shape(feature_pyramid[level])[2]
featuremap_height = tf.cast(featuremap_height, tf.int32)
featuremap_width = tf.cast(featuremap_width, tf.int32)
fm_size_list.append([featuremap_height, featuremap_width])
fcos_target_batch = tf.py_func(self.sampler_fcos.get_fcos_target_batch,
inp=[gtboxes_batch, img_batch, fm_size_list],
Tout=[tf.float32])
fcos_target_batch = tf.reshape(fcos_target_batch, [self.batch_size, -1, 7])
return fcos_target_batch
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h=None, gtboxes_batch_r=None, gpu_id=0):
if self.is_training:
# gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [self.batch_size, -1, 5])
# gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [self.batch_size, -1, 9])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
if self.cfgs.USE_GN:
input_img_batch = tf.reshape(input_img_batch, [self.batch_size, self.cfgs.IMG_SHORT_SIDE_LEN,
self.cfgs.IMG_MAX_LENGTH, 3])
# 1. build backbone
feature_pyramid = self.build_backbone(input_img_batch)
# 2. build rpn
# rpn_box_offset_list: [level, bs, h, w, 5]
rpn_box_offset_list, rpn_cls_score_list, rpn_cls_prob_list, rpn_cnt_scores_list = self.rpn_net(feature_pyramid, 'rpn_net')
# rpn_box_offset: [level, bs, h*w, 5]
rpn_cls_score, rpn_cls_prob, rpn_cnt_scores, rpn_box_offset = [], [], [], []
for i in range(len(rpn_box_offset_list)):
rpn_cls_score.append(tf.reshape(rpn_cls_score_list[i], [self.batch_size, -1, self.cfgs.CLASS_NUM]))
rpn_cls_prob.append(tf.reshape(rpn_cls_prob_list[i], [self.batch_size, -1, self.cfgs.CLASS_NUM]))
rpn_box_offset.append(tf.reshape(rpn_box_offset_list[i], [self.batch_size, -1, 5]))
rpn_cnt_scores.append(tf.reshape(rpn_cnt_scores_list[i], [self.batch_size, -1]))
# rpn_box_offset: [bs, -1, 5]
rpn_cls_score = tf.concat(rpn_cls_score, axis=1)
# rpn_cls_prob = tf.concat(rpn_cls_prob, axis=1)
rpn_cnt_scores = tf.concat(rpn_cnt_scores, axis=1)
rpn_box_offset = tf.concat(rpn_box_offset, axis=1)
# rpn_cnt_prob = tf.nn.sigmoid(rpn_cnt_scores)
# rpn_cnt_prob = tf.expand_dims(rpn_cnt_prob, axis=2)
# rpn_cnt_prob = tf.broadcast_to(rpn_cnt_prob,
# [self.batch_size, tf.shape(rpn_cls_prob)[1], tf.shape(rpn_cls_prob)[2]])
# rpn_prob = rpn_cls_prob * rpn_cnt_prob
# 3. build loss
if self.is_training:
with tf.variable_scope('build_loss'):
fcos_target_batch = self._fcos_target(feature_pyramid, input_img_batch, gtboxes_batch_r)
cls_gt = tf.stop_gradient(fcos_target_batch[:, :, 0])
ctr_gt = tf.stop_gradient(fcos_target_batch[:, :, 1])
geo_gt = tf.stop_gradient(fcos_target_batch[:, :, 2:])
cls_loss = self.losses.focal_loss_fcos(rpn_cls_score, cls_gt,
alpha=self.cfgs.ALPHA, gamma=self.cfgs.GAMMA)
ctr_loss = self.losses.centerness_loss(rpn_cnt_scores, ctr_gt, cls_gt)
# reg_loss = self.losses.iou_loss(geo_gt, rpn_box_offset, cls_gt, weight=ctr_gt)
# left, bottom, right, top, theta = tf.unstack(geo_gt, axis=-1)
# geo_gt = tf.stack([left, bottom, right, top, tf.sin(theta), tf.cos(theta)], axis=-1)
reg_loss = self.losses.smooth_l1_loss(geo_gt, rpn_box_offset, cls_gt, weight=ctr_gt)
self.losses_dict['cls_loss'] = cls_loss * self.cfgs.CLS_WEIGHT
self.losses_dict['reg_loss'] = reg_loss * self.cfgs.REG_WEIGHT
self.losses_dict['ctr_loss'] = ctr_loss * self.cfgs.CTR_WEIGHT
# 5. postprocess
with tf.variable_scope('postprocess_detctions'):
boxes, scores, category = self.postprocess_detctions(rpn_box_offset_list=[rpn_box_offset[0, :, :, :] for rpn_box_offset in rpn_box_offset_list],
rpn_cls_prob_list=[rpn_cls_prob[0, :, :, :] for rpn_cls_prob in rpn_cls_prob_list],
rpn_cnt_scores_list=[rpn_cnt_scores[0, :, :, :] for rpn_cnt_scores in rpn_cnt_scores_list],
gpu_id=gpu_id)
boxes = tf.stop_gradient(boxes)
scores = tf.stop_gradient(scores)
category = tf.stop_gradient(category)
if self.is_training:
return boxes, scores, category, self.losses_dict
else:
return boxes, scores, category
def postprocess_detctions(self, rpn_box_offset_list, rpn_cls_prob_list, rpn_cnt_scores_list, gpu_id):
def get_boxes_tf(points, geometry):
# pointx, pointy = points[:, 0], points[:, 1]
pointx, pointy = tf.unstack(points, axis=1)
left, bottom, right, top, theta = geometry[:, 0], geometry[:, 1], geometry[:, 2], geometry[:, 3], geometry[:, 4]
xlt, ylt = pointx - left, pointy - top
xlb, ylb = pointx - left, pointy + bottom
xrb, yrb = pointx + right, pointy + bottom
xrt, yrt = pointx + right, pointy - top
# theta = tf.atan(sin_theta/cos_theta)
theta *= -1
xlt_ = tf.cos(theta) * (xlt - pointx) + tf.sin(theta) * (ylt - pointy) + pointx
ylt_ = -tf.sin(theta) * (xlt - pointx) + tf.cos(theta) * (ylt - pointy) + pointy
xrt_ = tf.cos(theta) * (xrt - pointx) + tf.sin(theta) * (yrt - pointy) + pointx
yrt_ = -tf.sin(theta) * (xrt - pointx) + tf.cos(theta) * (yrt - pointy) + pointy
xld_ = tf.cos(theta) * (xlb - pointx) + tf.sin(theta) * (ylb - pointy) + pointx
yld_ = -tf.sin(theta) * (xlb - pointx) + tf.cos(theta) * (ylb - pointy) + pointy
xrd_ = tf.cos(theta) * (xrb - pointx) + tf.sin(theta) * (yrb - pointy) + pointx
yrd_ = -tf.sin(theta) * (xrb - pointx) + tf.cos(theta) * (yrb - pointy) + pointy
convert_box = tf.transpose(tf.stack([xlt_, ylt_, xrt_, yrt_, xrd_, yrd_, xld_, yld_], axis=0))
return convert_box
rpn_box_offset, rpn_cnt_scores, rpn_cls_prob, center = [], [], [], []
for i in range(len(rpn_box_offset_list)):
shift = 0.0
fm_height, fm_width = tf.shape(rpn_cnt_scores_list[i])[0], tf.shape(rpn_cnt_scores_list[i])[1]
y_list = tf.linspace(tf.constant(shift), tf.cast(fm_height, tf.float32) - tf.constant(shift),
tf.cast(fm_height, tf.int32))
y_list = tf.broadcast_to(tf.reshape(y_list, [fm_height, 1, 1]), [fm_height, fm_width, 1])
x_list = tf.linspace(tf.constant(shift), tf.cast(fm_width, tf.float32) - tf.constant(shift),
tf.cast(fm_width, tf.int32))
x_list = tf.broadcast_to(tf.reshape(x_list, [1, fm_width, 1]), [fm_height, fm_width, 1])
xy_list = tf.concat([x_list, y_list], axis=2) * self.cfgs.ANCHOR_STRIDE[i]
# yx_list = tf.concat([y_list, x_list], axis=2) * self.cfgs.ANCHOR_STRIDE[i]
center.append(tf.reshape(xy_list, [-1, 2]))
rpn_cls_prob.append(tf.reshape(rpn_cls_prob_list[i], [-1, self.cfgs.CLASS_NUM]))
rpn_cnt_scores.append(tf.reshape(rpn_cnt_scores_list[i], [-1, ]))
rpn_box_offset.append(tf.reshape(rpn_box_offset_list[i], [-1, 5]))
rpn_cls_prob = tf.concat(rpn_cls_prob, axis=0)
rpn_cnt_scores = tf.concat(rpn_cnt_scores, axis=0)
rpn_box_offset = tf.concat(rpn_box_offset, axis=0)
center = tf.concat(center, axis=0)
boxes_pred = get_boxes_tf(center, rpn_box_offset)
return_boxes_pred, return_scores, return_labels = [], [], []
for j in range(0, self.cfgs.CLASS_NUM):
scores = rpn_cls_prob[:, j]
if self.is_training:
indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.VIS_SCORE)), [-1, ])
else:
indices = tf.reshape(tf.where(tf.greater(scores, self.cfgs.FILTERED_SCORE)), [-1, ])
boxes_pred = tf.gather(boxes_pred, indices)
scores = tf.gather(scores, indices)
rpn_cnt_scores = tf.gather(rpn_cnt_scores, indices)
rpn_cnt_prob = tf.nn.sigmoid(rpn_cnt_scores)
rpn_prob = scores * rpn_cnt_prob
boxes_pred = tf.py_func(backward_convert,
inp=[boxes_pred, False],
Tout=[tf.float32])
boxes_pred = tf.reshape(boxes_pred, [-1, 5])
max_output_size = 4000 if 'DOTA' in self.cfgs.NET_NAME else 200
nms_indices = nms_rotate.nms_rotate(decode_boxes=boxes_pred,
scores=rpn_prob,
iou_threshold=self.cfgs.NMS_IOU_THRESHOLD,
max_output_size=100 if self.is_training else max_output_size,
use_gpu=True,
gpu_id=gpu_id)
tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, nms_indices), [-1, 5])
tmp_scores = tf.reshape(tf.gather(scores, nms_indices), [-1, ])
return_boxes_pred.append(tmp_boxes_pred)
return_scores.append(tmp_scores)
return_labels.append(tf.ones_like(tmp_scores) * (j + 1))
return_boxes_pred = tf.concat(return_boxes_pred, axis=0)
return_scores = tf.concat(return_scores, axis=0)
return_labels = tf.concat(return_labels, axis=0)
return return_boxes_pred, return_scores, return_labels
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Data Cleaning Codes
"""
'''
The data for the social network of the eight masters of the Tang and Song
are extracted from China Biographical Database Project (CBDB).
This code is intended to prepare two excel tables for visualization in Gephi.
input: query_results.xlsx
output: the_eight_gephi.xlsx
'''
import pandas as pd
import os
path = '<Your directory path>'
raw = pd.read_excel(path + os.sep + 'query_results.xlsx')
nodes = pd.DataFrame()
linked = pd.DataFrame() # Dataset is pairwise so need to record source and target separately
edge = pd.DataFrame()
# Select data to nodes
# Add index person to nodes
nodes[['Id','Label','姓名','Index Year','Dynasty','Index Place']] = raw[['PersonID','Name','姓名','Index Year','Dynasty','Index Place']]
# Add linked person to nodes
linked[['Id','Label','姓名','Index Year','Dynasty','Index Place']] = raw[['NodeID','Linked to','社會關係人姓名',"Node's Index Year",'Node Dynasty','Node Index Place']]
# Merge two df
nodes = pd.concat([nodes,linked])
# Remove duplicates
nodes = nodes.drop_duplicates()
# Set Id as index
nodes = nodes.set_index('Id')
# Mark the eight masters of the Tang and Song
Eight = ['Su Shi','Liu Zongyuan','Han Yu','Ouyang Xiu','Su Xun','Su Zhe','Wang Anshi','Zeng Gong']
nodes['The Eight'] = nodes['Label'].apply(lambda x: x in Eight)
# Select data to edge
edge[['Source','Target','Weight','Edge Dist.','Distance 距離']] = raw[['PersonID','NodeID','Count','Edge Dist.','Distance 距離']]
# Set Source as index
edge = edge.set_index('Source')
# Export tables
writer = pd.ExcelWriter(path + os.sep + 'the_eight_gephi.xlsx', engine = 'xlsxwriter')
nodes.to_excel(writer, sheet_name = 'nodes_table')
edge.to_excel(writer, sheet_name = 'edges_table')
writer.save() |
import requests
from game_apis.rest.api import API
from game_apis.log import get_logger
LOG = get_logger('rest', 'rest.log')
# https://developer.riotgames.com/api-methods/
class Riot(API):
ID = 'RIOT'
LIMIT = 1
def __init__(self, config, region=None, sandbox=False, local_config=False, ignore_limiter=False):
super().__init__(config, sandbox, local_config, ignore_limiter)
if region == None:
region = 'na1'
self.rest_api = "https://{}.api.riotgames.com".format(region)
def _get(self, command: str, options = None):
if options is None:
options = {}
base_url = "{}{}".format(self.rest_api, command)
if self.key_id is not None:
base_url = "{}?api_key={}".format(base_url, self.key_id)
# loop over dictionary of options and add them as query parameters to the url
# example: currencyPair=BTC_NXT&depth=10
for key, val in options.items():
if "?" not in base_url:
base_url = "{}?{}={}".format(base_url, key, val)
continue
base_url = "{}&{}={}".format(base_url, key, val)
self.check_limiter()
resp = requests.get(base_url)
self.reset_limiter()
if resp.status_code != 200:
LOG.error("%s: Status code %d", self.ID, resp.status_code)
LOG.error("%s: Headers: %s", self.ID, resp.headers)
LOG.error("%s: Resp: %s", self.ID, resp.text)
resp.raise_for_status()
return resp.json()
def hello_world(self):
return self._get("/lol/summoner/v3/summoners/by-name/RiotSchmick")
def champion_masteries(self, summoner_id):
return self._get('/lol/champion-mastery/v3/champion-masteries/by-summoner/{}'.format(summoner_id))
def champion_mastery(self, summoner_id, champoin_id):
return self._get('/lol/champion-mastery/v3/champion-masteries/by-summoner/{}/by-champion/{}'.format(summoner_id, champoin_id))
def champion_mastery_score(self, summoner_id):
return self._get('/lol/champion-mastery/v3/scores/by-summoner/{}'.format(summoner_id))
def champion_rotations(self):
return self._get('/lol/platform/v3/champion-rotations')
# api to get summoner information
def get_summoner_by_account(self, account_id):
return self._get('/lol/summoner/v3/summoners/by-account/{}'.format(account_id))
def get_summoner_by_name(self, summoner_name):
return self._get('/lol/summoner/v3/summoners/by-name/{}'.format(summoner_name))
def get_summoner_by_summoner_id(self, summoner_id):
return self._get('/lol/summoner/v3/summoners/{}'.format(summoner_id))
def get_matches_for_account(self, account_id, parameters = None):
return self._get('/lol/match/v3/matchlists/by-account/{}'.format(account_id), parameters)
def get_match_by_id(self, match_id):
return self._get('/lol/match/v3/matches/{}'.format(match_id))
def get_match_timeline(self, match_id):
return self._get('/lol/match/v3/timelines/by-match/{}'.format(match_id))
def get_match_ids_tournament(self, tournament_code):
return self._get(' /lol/match/v3/matches/by-tournament-code/{}/ids'.format(tournament_code))
def get_current_game_info(self, summoner_id):
return self._get('/lol/spectator/v3/active-games/by-summoner/{}'.format(summoner_id))
def get_featured_games(self):
return self._get('/lol/spectator/v3/featured-games')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.