content
stringlengths 5
1.05M
|
|---|
"""
advance Sleep functions
author: Valentyn Stadnytskyi
data: 2017 - Nov 17 2018
functions:
psleep - precision sleep takes
intsleep with inputs t, dt and interupt as a function.
The precision sleep class.
functiob:
psleep - sleep specified amount of time with sub milisecond precision
test_sleep - for testing purposes. will print how much time the code waited.
This is important for the Windows platform programs if precise wait is required.
The Windows OS has ~15-17 ms latenct - the shortest time between attentions from OS.
"""
__vesrion__ = '1.0.0'
from time import time, sleep
import sys
if sys.version_info[0] == 2:
from time import clock as perf_counter
else:
from time import perf_counter
import platform
def precision_sleep(t = 0.02, min_time = 0.017):
"""
sleep for t seconds.
"""
from time import time, sleep,perf_counter
import platform
min_time
time_start = perf_counter()
if platform.system() == 'Linux':
sleep(t)
#elif platform.system() == 'Darwin':
# sleep(t)
else:
if t>min_time:
sleep(t-min_time)
time_left = t - (perf_counter() - time_start)
time_while_start = perf_counter()
while perf_counter() - time_while_start <= time_left:
pass
else:
time_left = t - (perf_counter() - time_start)
time_while_start = perf_counter()
while perf_counter() - time_while_start <= time_left:
pass
def interupt_sleep(t= 0.02,dt = 0.01, interupt = None):
"""precision sleep function with interupt capabilities
input:
t - time to sleep
dt - check intervals
interupt - interupt function that return boolean
"""
from time import perf_counter,sleep,time
import types
if not isinstance(interupt,types.FunctionType):
precision_sleep(t)
else:
time_start = perf_counter()
while perf_counter() - time_start <= t:
if interupt(): break
if (perf_counter() - time_start) >= dt:
precision_sleep(dt)
else:
precision_sleep(perf_counter() - time_start)
if __name__ == '__main__':
def test_psleep(t = 0.01):
"""
test_sleep t = 0.01 in seconds
"""
from time import time, sleep,perf_counter
t1 = perf_counter()
psleep(t)
t2 = perf_counter()
dt = t2-t1
print(dt)
def test_intsleep(t = 0.1, dt = 0.1, interupt = False):
"""
test_sleep t = 0.01 in seconds
"""
from time import time, sleep,perf_counter
print(t,dt,interupt)
t1 = perf_counter()
intsleep(t = t,dt = dt, interupt = interupt)
t2 = perf_counter()
dt = t2-t1
print(dt)
def interupt():
"""
return value of a global variable flag
"""
global flag
return flag
flag = True
print('test_psleep(0.010) # in seconds')
print('test_intsleep(0.10,0.01,interupt) # in seconds')
print('intsleep(0.10,0.1) # in seconds')
|
from .slim import Slim
from .rfb import RFB
__all__ = [
'Slim', 'RFB'
]
|
#!/usr/bin/env python
from distutils.core import setup
import os
os.chdir("../lib")
setup(name='logconfiggen',
version='0.1',
py_modules=["logconfiggen"]
)
|
"""
A base representation of an instance of Galaxy
"""
from bioblend.galaxy import (config, datasets, datatypes, folders, forms,
ftpfiles, genomes, groups, histories,
invocations, jobs, libraries, quotas, roles,
tool_data, tools, toolshed, users, visual,
workflows)
from bioblend.galaxy.client import Client
from bioblend.galaxyclient import GalaxyClient
class GalaxyInstance(GalaxyClient):
def __init__(self, url, key=None, email=None, password=None, verify=True):
"""
A base representation of a connection to a Galaxy instance, identified
by the server URL and user credentials.
After you have created a ``GalaxyInstance`` object, access various
modules via the class fields. For example, to work with histories and
get a list of all the user's histories, the following should be done::
from bioblend import galaxy
gi = galaxy.GalaxyInstance(url='http://127.0.0.1:8000', key='your_api_key')
hl = gi.histories.get_histories()
:type url: str
:param url: A FQDN or IP for a given instance of Galaxy. For example:
http://127.0.0.1:8080 . If a Galaxy instance is served under
a prefix (e.g., http://127.0.0.1:8080/galaxy/), supply the
entire URL including the prefix (note that the prefix must
end with a slash). If a Galaxy instance has HTTP Basic
authentication with username and password, then the
credentials should be included in the URL, e.g.
http://user:pass@host:port/galaxy/
:type key: str
:param key: User's API key for the given instance of Galaxy, obtained
from the user preferences. If a key is not supplied, an
email address and password must be and the key will
automatically be created for the user.
:type email: str
:param email: Galaxy e-mail address corresponding to the user.
Ignored if key is supplied directly.
:type password: str
:param password: Password of Galaxy account corresponding to the above
e-mail address. Ignored if key is supplied directly.
:param verify: Whether to verify the server's TLS certificate
:type verify: bool
"""
super().__init__(url, key, email, password, verify=verify)
self.libraries = libraries.LibraryClient(self)
self.histories = histories.HistoryClient(self)
self.workflows = workflows.WorkflowClient(self)
self.invocations = invocations.InvocationClient(self)
self.datasets = datasets.DatasetClient(self)
self.users = users.UserClient(self)
self.genomes = genomes.GenomeClient(self)
self.tools = tools.ToolClient(self)
self.toolshed = toolshed.ToolShedClient(self)
self.toolShed = self.toolshed # historical alias
self.config = config.ConfigClient(self)
self.visual = visual.VisualClient(self)
self.quotas = quotas.QuotaClient(self)
self.groups = groups.GroupsClient(self)
self.roles = roles.RolesClient(self)
self.datatypes = datatypes.DatatypesClient(self)
self.jobs = jobs.JobsClient(self)
self.forms = forms.FormsClient(self)
self.ftpfiles = ftpfiles.FTPFilesClient(self)
self.tool_data = tool_data.ToolDataClient(self)
self.folders = folders.FoldersClient(self)
@property
def max_get_attempts(self):
return Client.max_get_retries()
@max_get_attempts.setter
def max_get_attempts(self, v):
Client.set_max_get_retries(v)
@property
def get_retry_delay(self):
return Client.get_retry_delay()
@get_retry_delay.setter
def get_retry_delay(self, v):
Client.set_get_retry_delay(v)
def __repr__(self):
"""
A nicer representation of this GalaxyInstance object
"""
return "GalaxyInstance object for Galaxy at {0}".format(self.base_url)
|
from app.core import lista_de_comandos, usuarios_conectados
from app.core.filtro import normalizar
async def nome_valido(enviar, nome: str) -> bool:
"""
Verifique se um nome é válido para ser usado.
:param enviar: Uma função que será usada para retornar as mensagens de erro ao usuário.
:param nome: O nome que será avaliado.
:return: True se o nome puder ser usado, False em caso contrário.
"""
nome = normalizar(nome)
if len(nome) < 2 or len(nome) > 20:
await enviar('> O nome deve ter entre 2 e 20 caracteres')
return False
elif nome in usuarios_conectados:
await enviar('> Este nome já está em uso, tente outro')
return False
elif nome in lista_de_comandos:
await enviar('> Este nome é um comando do bate-papo, tente outro')
return False
return True
|
# Generated by Django 3.1 on 2020-09-08 09:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0021_auto_20200908_1231'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='date_of_birth',
new_name='test_field',
),
]
|
import binascii
import errno
import os
import re
import yaml
import StringIO
import tempfile
import shutil
import zipfile
from urlparse import urlparse
#from csv import DictWriter
from osgeo import ogr, osr
from socket import error as socket_error
from django.conf import settings
from django.http import Http404
from django.views.generic import View
from django.shortcuts import HttpResponse, render_to_response
try:
import simplejson as json
except ImportError:
import json
from geodash.cache import provision_memcached_client
from geodash.utils import extract, grep, getRequestParameters
from geodash.enumerations import ATTRIBUTE_TYPE_TO_OGR
def parse_path(path):
basepath, filepath = os.path.split(path)
filename, ext = os.path.splitext(filepath)
return (basepath, filename, ext)
class GeoDashDictWriter():
def __init__(self, output, fields, fallback=""):
self.output = output
self.fields = fields
self.fallback = fallback
self.delimiter = u","
self.quote = u'"'
self.newline = u"\n"
def writeheader(self):
self.output = self.output + self.delimiter.join([self.quote+x['label']+self.quote for x in self.fields]) + self.newline
def writerow(self, rowdict):
row = [extract(x['path'], rowdict, self.fallback) for x in self.fields]
#
row = [unicode(x) for x in row]
row = [x.replace('"','""') for x in row]
#
self.output = self.output + self.delimiter.join([self.quote+x+self.quote for x in row]) + self.newline
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append([extract(x['path'], rowdict, self.fallback) for x in self.fields])
for row in rows:
#
row = [unicode(x) for x in row]
row = [x.replace('"','""') for x in row]
#
self.output = self.output + self.delimiter.join([self.quote+x+self.quote for x in row]) + self.newline
def getvalue(self):
return self.output
class geodash_data_view(View):
key = None
def _build_root(self, request, *args, **kwargs):
return None
def _build_key(self, request, *args, **kwargs):
return self.key
def _build_attributes(self, request, *args, **kwargs):
#raise Exception('geodash_data_view._build_attributes should be overwritten. This API likely does not support CSV.')
return None
def _build_geometry(self, request, *args, **kwargs):
return None
def _build_geometry_type(self, request, *args, **kwargs):
return None
def _build_data(self):
raise Exception('geodash_data_view._build_data should be overwritten')
def get(self, request, *args, **kwargs):
ext_lc = kwargs['extension'].lower();
##
data = None
if settings.GEODASH_CACHE_DATA:
client = provision_memcached_client()
if client:
key = self._build_key(request, *args, **kwargs)
print "Checking cache with key ", key
data = None
try:
data = client.get(key)
except socket_error as serr:
data = None
print "Error getting data from in-memory cache."
if serr.errno == errno.ECONNREFUSED:
print "Memcached is likely not running. Start memcached with supervisord."
raise serr
if not data:
print "Data not found in cache."
data = self._build_data(request, *args, **kwargs)
if ext_lc == "geodash":
data = [int(x) for x in data]
try:
client.set(key, data)
except socket_error as serr:
print "Error saving data to in-memory cache."
if serr.errno == errno.ECONNREFUSED:
print "Memcached is likely not running or the data exceeds memcached item size limit. Start memcached with supervisord."
raise serr
else:
print "Data found in cache."
else:
print "Could not connect to memcached client. Bypassing..."
data = self._build_data(request, *args, **kwargs)
else:
print "Not caching data (settings.geodash_CACHE_DATA set to False)."
data = self._build_data(request, *args, **kwargs)
#content = json.dumps(data, default=jdefault)
#content = re.sub(
# settings.GEODASH_REGEX_CLIP_COORDS_PATTERN,
# settings.GEODASH_REGEX_CLIP_COORDS_REPL,
# content,
# flags=re.IGNORECASE)
root = self._build_root(request, *args, **kwargs)
attributes = self._build_attributes(request, *args, **kwargs)
if attributes:
data = grep(
obj=data,
root=root,
attributes=attributes,
filters=getRequestParameters(request, "grep", None)
)
if ext_lc == "json":
return HttpResponse(json.dumps(data, default=jdefault), content_type="application/json")
elif ext_lc == "yml" or ext_lc == "yaml":
response = yaml.safe_dump(data, encoding="utf-8", allow_unicode=True, default_flow_style=False)
return HttpResponse(response, content_type="text/plain")
elif ext_lc == "csv" or ext_lc == "csv":
writer = GeoDashDictWriter("", attributes)
writer.writeheader()
writer.writerows(extract(root, data, []))
response = writer.getvalue()
return HttpResponse(response, content_type="text/csv")
elif ext_lc == "zip":
# See the following for how to create zipfile in memory, mostly.
# https://newseasandbeyond.wordpress.com/2014/01/27/creating-in-memory-zip-file-with-python/
tempDirectory = tempfile.mkdtemp()
print "Temp Directory:", tempDirectory
if tempDirectory:
geometryType = self._build_geometry_type(request, *args, **kwargs)
########### Create Files ###########
os.environ['SHAPE_ENCODING'] = "utf-8"
# See following for how to create shapefiles using OGR python bindings
# https://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.html#filter-and-select-input-shapefile-to-new-output-shapefile-like-ogr2ogr-cli
basepath, out_filename, ext = parse_path(request.path)
out_shapefile = os.path.join(tempDirectory, out_filename+".shp" )
out_driver = ogr.GetDriverByName("ESRI Shapefile")
if os.path.exists(out_shapefile):
out_driver.DeleteDataSource(out_shapefile)
out_datasource = out_driver.CreateDataSource(out_shapefile)
out_layer = out_datasource.CreateLayer(
(out_filename+".shp").encode('utf-8'),
geom_type=geometryType
)
########### Create Fields ###########
out_layer.CreateField(ogr.FieldDefn("id", ogr.OFTInteger)) # Create ID Field
for attribute in attributes:
label = attribute.get('label_shp') or attribute.get('label')
out_layer.CreateField(ogr.FieldDefn(
label,
ATTRIBUTE_TYPE_TO_OGR.get(attribute.get('type', 'string'))
))
########### Create Features ###########
features = extract(root, data, []);
for i in range(len(features)):
feature = features[i]
out_feature = ogr.Feature(out_layer.GetLayerDefn())
geom = extract(self._build_geometry(request, *args, **kwargs), feature, None)
out_feature.SetGeometry(ogr.CreateGeometryFromJson(json.dumps(geom, default=jdefault)))
out_feature.SetField("id", i)
for attribute in attributes:
label = attribute.get('label_shp') or attribute.get('label')
out_value = extract(attribute.get('path'), feature, None)
out_feature.SetField(
(attribute.get('label_shp') or attribute.get('label')),
out_value.encode('utf-8') if isinstance(out_value, basestring) else out_value
)
out_layer.CreateFeature(out_feature)
out_datasource.Destroy()
########### Create Projection ###########
spatialRef = osr.SpatialReference()
spatialRef.ImportFromEPSG(4326)
spatialRef.MorphToESRI()
with open(os.path.join(tempDirectory, out_filename+".prj"), 'w') as f:
f.write(spatialRef.ExportToWkt())
f.close()
########### Create Zipfile ###########
buff = StringIO.StringIO()
zippedShapefile = zipfile.ZipFile(buff, mode='w')
#memoryFiles = []
component_filenames= os.listdir(tempDirectory);
#for i in range(len(componentFiles)):
# memoryFiles.append(StringIO.StringIO())
for i in range(len(component_filenames)):
with open(os.path.join(tempDirectory, component_filenames[i]), 'r') as f:
contents = f.read()
zippedShapefile.writestr(component_filenames[i], contents)
zippedShapefile.close()
print "zippedShapefile.printdir()", zippedShapefile.printdir()
########### Delete Temporary Directory ###########
shutil.rmtree(tempDirectory)
########### Response ###########
return HttpResponse(buff.getvalue(), content_type="application/zip")
#for i in range(len(componentFiles)):
# with open(componentFiles[i], 'w') as componentFile:
# memoryFiles[i].write(componentFile.read())
else:
raise Http404("Could not acquire temporary directory for building shapefile.")
elif ext_lc == "geodash":
response = HttpResponse(content_type='application/octet-stream')
# Need to do by bytes(bytearray(x)) to properly translate integers to 1 byte each
# If you do bytes(data) it will give 4 bytes to each integer.
response.write(bytes(bytearray(data)))
return response
else:
raise Http404("Unknown config format.")
def jdefault(o):
return o.__dict__
|
import sys
import numpy as np
from collections import OrderedDict
from h5files import ParameterFile
import logging
# Global parameters and type defs
DOMAINS = ["cyt", "cleft", "jsr", "nsr", "tt"]
domain_id = np.uint8
float_type = np.float32
float_type = np.float64
class UndefinedSpeciesError(Exception):
pass
class UndefinedDomainError(Exception):
pass
class SpeciesDomainMismatchError(Exception):
pass
class DomainSpecies(object):
"""Container for a specific species in a given domain.
Parameters
----------
domain : str
Name of the domain the species resides in
species : str
Name of the species
init : float, optional
Initial concentration of the species, must be positive
sigma : float, optional
Diffusivity. Defaults to 0, meaning the species is non-diffusive
fixed : bool, optional
???
"""
def __init__(self, domain, species, init=1.0, sigma=0., fixed=False):
# Sanity checks on input
assert isinstance(domain, str), "domain must be given as str"
assert domain in DOMAINS, "domain not recognized"
assert isinstance(species, str), "species must be given as str"
assert "-" not in species, "- is not allowed in species name"
assert isinstance(init, float)
assert isinstance(sigma, float)
assert init > 0.
assert sigma >= 0.0
self.species = species
self.domain = domain
self.sigma = sigma
self.init = init
self.fixed = fixed
@property
def domain_name(self):
return self.species + self.domain
@property
def diffusive(self):
"""True if the species is diffusive in its domain."""
return self.sigma > 0.
def __str__(self):
return "DomainSpecies({}, {}, {})".format(self.domain,
self.species,
"diffusive" if self.diffusive
else "non-diffusive")
class BufferReaction(object):
"""Container for a buffer reaction in a given domain.
Parameters
----------
domain : str
Name of domain where buffer reaction is found
species : str
Name of buffer species
bound_species : str
Name of species being bound by buffer
tot : float
Total concentration of buffer
k_on : float
Association rate coefficient
k_off : float
Dissociation rate coefficient
"""
def __init__(self, domain, species, bound_species, tot, k_on, k_off):
# Sanity checks on input
assert isinstance(domain, str), "domain must be given as str"
assert domain in DOMAINS, "domain not recognized"
assert isinstance(species, str)
assert isinstance(bound_species, str)
assert all(isinstance(v, float) and v > 0 for v in [tot, k_on, k_off])
self.domain = domain
self.species = species
self.bound_species = bound_species
self.tot = tot
self.k_on = k_on
self.k_off = k_off
@property
def kd(self):
"""Equilibrium binding concstant."""
return self.k_off/self.k_on
def __str__(self):
return "BufferReaction({}, {} <-> {})".format(self.domain,
self.species,
self.bound_species)
class BoundaryFlux(object):
"""Container for fluxes between domains."""
pass
class RyR(BoundaryFlux):
"""Container class for a ryanodine receptor.
Notes
-----
???
Parameters
----------
Kd_open : float, optional
Equilibrium binding constant in open state
Kd_close : float, optional
Equilibrium binding constant in closed state
k_min_open : float, optional
???
k_max_open : float, optional
???
k_min_close : float, optional
???
k_max_close : float, optional
???
n_open : float, optional
???
n_close : float, optional
???
i_unitary : float, optional
???
"""
def __init__(self, Kd_open=127.92,
k_min_open=1.e-4,
k_max_open=0.7,
n_open=2.8,
Kd_close=62.5,
k_min_close=0.9,
k_max_close=10.,
n_close=-0.5,
i_unitary=0.5):
self.boundary = "ryr"
self.species = "Ca"
self.i_unitary = i_unitary
self.Kd_open = Kd_open
self.k_min_open = k_min_open
self.k_max_open = k_max_open
self.n_open = n_open
self.Kd_close = Kd_close
self.k_min_close = k_min_close
self.k_max_close = k_max_close
self.n_close = n_close
class SERCA(BoundaryFlux):
"""Container for SERCA channels.
Parameters
----------
v_cyt_to_A_sr : float, optional
???
density : float, optional
???
scale : float, optional
???
"""
def __init__(self, v_cyt_to_A_sr=307., density=75., scale=1.0):
assert all(isinstance(v, float) for v in [v_cyt_to_A_sr, density, scale])
self.boundary = "serca"
self.species = "Ca"
self.v_cyt_to_A_sr = v_cyt_to_A_sr
self.density = density
self.scale = scale
def write_species_params(filename, domains, domain_species,
buffer_reactions=None, boundary_fluxes=None):
"""
Parameters
----------
filename : str
File to write the parameter data to.
domains : list of str
domain_species : list of DomainSpecies objects
buffer_reactions : list of BufferReaction objects
boundary_fluxes : list of Flux objects
"""
boundary_fluxes if boundary_fluxes is not None else []
buffer_reactions = buffer_reactions if buffer_reactions is not None else []
# Type checks
assert isinstance(filename, str)
assert isinstance(domains, list)
assert isinstance(domain_species, list)
assert isinstance(buffer_reactions, list)
assert isinstance(boundary_fluxes, list)
assert all(isinstance(d, str) for d in domains)
assert all(isinstance(ds, DomainSpecies) for ds in domain_species)
assert all(isinstance(br, BufferReaction) for br in buffer_reactions)
assert all(isinstance(bf, BoundaryFlux) for bf in boundary_fluxes)
all_domains = domains #???
species = OrderedDict()
domains = OrderedDict()
diffusive_species = []
for ds in domain_species:
assert ds.domain in all_domains, "domain {} is not among the "\
"original domains".format(ds.species)
# Add to species dict
if ds.species not in species:
species[ds.species] = []
species[ds.species].append(ds.domain)
# Add to domain dict
if ds.domain not in domains:
domains[ds.domain] = dict(species=OrderedDict(),
buffers=OrderedDict(),
diffusive=OrderedDict())
domains[ds.domain]['species'][ds.species] = ds
if ds.diffusive:
domains[ds.domain]['diffusive'][ds.species] = ds
if ds.species not in diffusive_species:
diffusive_species.append(ds.species)
# Checking that BufferReaction and DomainSpecies objects are consistent
for br in buffer_reactions:
if br.domain not in domains:
e = "Buffer domain '{}' not recognized".format(br.domain)
raise UndefinedDomainError(e)
if br.species not in species:
e = "Buffer species '{}' not recognized".format(br.species)
raise UndefinedSpeciesError(e)
if br.bound_species not in species:
e = "Bound species '{}' not recognized".format(br.species)
raise UndefinedSpecieserror(e)
if br.species not in domains[br.domain]['species']:
print domains[br.domain]['species']
e = "'{}' not found in '{}'".format(br.species, br.domain)
raise SpeciesDomainMismatchError(e)
if br.bound_species not in domains[br.domain]['species']:
e = "'{}' not found in '{}'".format(br.bound_species, br.domain)
raise SpeciesDomainMismatchError(e)
domains[br.domain]['buffers'][br.species] = br
# Update initial value of buffers
b = domains[br.domain]['species'][br.species]
s = domains[br.domain]['species'][br.bound_species]
b.init = br.tot*s.init/(s.init + br.kd)
# Lump species name in different domains
max_num_species = max(len(domains[domain]['species']) for domain in domains)
min_num_species = min(len(domains[domain]['species']) for domain in domains)
distinct_species = [set() for i in range(min_num_species)]
for domain in domains:
sps = domains[domain]['species'].keys()
for i in range(min_num_species):
distinct_species[i].add(sps[i])
# Make a map linking old names to new names
species_map = OrderedDict()
for old_species, new_species in zip(distinct_species, \
["-".join(s) for s in distinct_species]):
for osp in old_species:
species_map[osp] = new_species
# Log info about species lumping
logging.debug("The following species are lumped")
for os, ns in species_map.items():
if os != ns:
logging.debug("{} -> {}".format(os, ns))
# Update all species names from the map
for domain in domains:
for stype in ['species', 'buffers', 'diffusive']:
new_species = OrderedDict()
for sname, sobject in domains[domain][stype].items():
if sname in species_map:
new_species[species_map[sname]] = sobject
sobject.species = species_map[sname]
else:
new_species[sname] = sobject
if isinstance(sobject, BufferReaction):
if sobject.bound_species in species_map:
sobject.bound_species = species_map[sobject.bound_species]
domains[domain][stype] = new_species
# Log info for debugging
species_info = 'Domain Species:\n\t\t {}'.format("\n\t\t ".join(
(str(ds) for domain in domains
for ds in domains[domain]['species'].values())))
diffusion_info = 'Domain Species:\n\t\t {}'.format("\n\t\t ".join(
(str(ds) for domain in domains
for ds in domains[domain]['diffusive'].values())))
buffer_info = 'Buffer Reactions:\n\t\t {}'.format("\n\t\t ".join(
(str(ds) for domain in domains
for ds in domains[domain]['buffers'].values())))
logging.debug(species_info)
logging.debug(buffer_info)
# Change species dict to reflect lumping
updated_species = OrderedDict()
for os, os_domains in species.items():
if os not in species_map:
updated_species[os] = os_domains[:]
else:
ns = species_map[os]
if ns not in updated_species:
updated_species[ns] = os_domains[:]
else:
for domain in os_domains:
if domain not in updated_species[ns]:
updated_species[ns].append(domain)
if os in diffusive_species:
index = diffusive_species.index(os)
diffusive_species.pop(index)
diffusive_species.insert(index, ns)
species = updated_species
def species_cmp(a, b):
if a not in diffusive_species and b not in diffusive_species:
return 0
elif a in diffusive_species and b in diffusive_species:
return 0
elif a in diffusive_species:
return -1
else:
return 1
def domain_cmp(a, b):
if a not in domains and b not in domains:
return 0
elif a in domains and b in domains:
return cmp(domains.keys().index(a), domains.keys().index(b))
elif a in domains:
return -1
else:
return 1
species_list = sorted(species.keys(), cmp=species_cmp)
domain_list = sorted(all_domains, cmp=domain_cmp)
logging.warning("New species: {}".format(species_list))
logging.warning("Check that these make sense as we "
"have a faulty selection algorithm.")
# Open parameter h5 file and write data
with ParameterFile(filename) as f:
# Extract boundary fluxes
use_ryr = False
use_serca = False
for flux in boundary_fluxes:
if isinstance(flux, RyR):
ryr = flux
use_ryr = True
elif isinstance(flux, SERCA):
serca = flux
use_serca = True
f.attrs.create("use_ryr", use_ryr, dtype=np.uint8)
f.attrs.create("use_serca", use_serca, dtype=np.uint8)
if use_ryr:
g_ryr = f.create_group("ryr")
g_ryr.attrs.create("Kd_open", ryr.Kd_open, dtype=float_type)
g_ryr.attrs.create("k_min_open", ryr.k_min_open, dtype=float_type)
g_ryr.attrs.create("k_max_open", ryr.k_max_open, dtype=float_type)
g_ryr.attrs.create("n_open", ryr.n_open, dtype=float_type)
g_ryr.attrs.create("Kd_close", ryr.Kd_close, dtype=float_type)
g_ryr.attrs.create("k_min_close", ryr.k_min_close, dtype=float_type)
g_ryr.attrs.create("k_max_close", ryr.k_max_close, dtype=float_type)
g_ryr.attrs.create("n_close", ryr.n_close, dtype=float_type)
g_ryr.attrs.create("i_unitary", ryr.i_unitary, dtype=float_type)
g_ryr.attrs.create("boundary", ryr.boundary)
g_ryr.attrs.create("species", ryr.species)
if use_serca:
g_serca = f.create_group("serca")
g_serca.attrs.create("v_cyt_to_A_sr", serca.v_cyt_to_A_sr,
dtype=float_type)
g_serca.attrs.create("density", serca.density, dtype=float_type)
g_serca.attrs.create("scale", serca.scale, dtype=float_type)
g_serca.attrs.create("v_cyt_to_A_sr", serca.v_cyt_to_A_sr, dtype=float_type)
g_serca.attrs.create("boundary", serca.boundary)
g_serca.attrs.create('species', serca.species)
# Add domains
f.attrs.create("num_domains", len(domain_list), dtype=domain_id)
for num, dn in enumerate(domain_list):
f.attrs.create("domain_name_{}".format(num), dn)
# Add species
f.attrs.create("num_species", len(species_list), dtype=domain_id)
for inds, sp in enumerate(species_list):
f.attrs.create("species_name_{}".format(inds), sp)
# Iterate over the domains and add species and buffer information
fixed = []
for indd, dom in enumerate(domain_list):
g = f.create_group(dom)
num_diffusive = 0
diffusive = []
sigmas = []
inits = []
for inds, sp in enumerate(species_list):
if dom in domains and sp in domains[dom]['species']:
sigma = domains[dom]['species'][sp].sigma
init = domains[dom]['species'][sp].init
if domains[dom]['species'][sp].fixed:
fixed.extend([indd,inds])
else:
sigma = 0.0
init = 0.0
inits.append(init)
if sigma > 0:
num_diffusive += 1
diffusive.append(inds)
sigmas.append(sigma)
if dom in domains:
num_buff = len(domains[dom]['buffers'])
for indb, buff in enumerate(sorted(domains[dom]['buffers'].values(),
cmp=lambda a,b:cmp(
species_list.index(a.species),
species_list.index(b.species)))):
bg = g.create_group("buffer_{}".format(indb))
buff_sp = [species_list.index(buff.species), \
species_list.index(buff.bound_species)]
bg.attrs.create('species', np.array(buff_sp, dtype=domain_id))
bg.attrs.create("k_off", buff.k_off, dtype=float_type)
bg.attrs.create("k_on", buff.k_on, dtype=float_type)
bg.attrs.create("tot", buff.tot, dtype=float_type)
else:
num_buff = 0
g.attrs.create("num_buffers", num_buff, dtype=domain_id)
g.attrs.create("num_diffusive", num_diffusive, dtype=domain_id)
g.attrs.create('diffusive', np.array(diffusive, dtype=domain_id))
g.attrs.create("sigma", np.array(sigmas, dtype=float_type))
g.attrs.create("init", np.array(inits, dtype=float_type))
f.attrs.create("num_fixed_domain_species", len(fixed)/2, dtype=domain_id)
f.attrs.create("fixed_domain_species", np.array(fixed, dtype=domain_id))
# Save convolution constants in nm
s_x = 20000/np.log(2); s_y = s_x; s_z = 80000/np.log(2)
#s_x = 5000/np.log(2); s_y = s_x; s_z = 5000/np.log(2)
#s_x = 12/np.log(2); s_y = s_x; s_z = 12/np.log(2)
f.attrs.create("convolution_constants", np.array([s_x, s_y, s_z], dtype=float_type))
if __name__ == "__main__":
logging.getLogger().setLevel(20)
f = 1.0
domain_species = [DomainSpecies("cyt", "Ca", 0.14, sigma=220.e3),
DomainSpecies("cyt", "Fluo3", sigma=42.e3),
DomainSpecies("cyt", "CMDN", sigma=22.e3),
DomainSpecies("cyt", "ATP", sigma=140.e3),
DomainSpecies("cyt", "TRPN"),
DomainSpecies("cleft", "Ca", 0.14, sigma=220.e3*f),
DomainSpecies("cleft", "Fluo3", sigma=42.e3*f),
DomainSpecies("cleft", "CMDN", sigma=22.e3*f),
DomainSpecies("cleft", "ATP", sigma=140.e3*f),
DomainSpecies("cleft", "TRPN"),
DomainSpecies("jsr", "Ca", 10.0, 73.3e3),
DomainSpecies("jsr", "Fluo5", sigma=8.e3),
DomainSpecies("jsr", "CSQN"),
DomainSpecies("nsr", "Ca", 1300., 0.01*73.3e3),
DomainSpecies("nsr", "Fluo5", sigma=8.e3),
DomainSpecies("nsr", "CSQN")]
buffers = [BufferReaction("cyt", "CMDN", "Ca", 24., 34e-3, 238.e-3),
BufferReaction("cyt", "ATP", "Ca", 455., 255e-3, 45.),
BufferReaction("cyt", "Fluo3", "Ca", 25., 10*110e-3, 10*110e-3),
BufferReaction("cyt", "TRPN", "Ca", 70., 32.7e-3, 19.6e-3),
BufferReaction("cleft", "CMDN", "Ca", 24., 34e-3, 238.e-3),
BufferReaction("cleft", "ATP", "Ca", 455., 255e-3, 45.),
BufferReaction("cleft", "Fluo3", "Ca", 25.,10*110e-3, 10*110e-3),
BufferReaction("cleft", "TRPN", "Ca", 47., 32.7e-3, 19.6e-3),
#BufferReaction("jsr", "Fluo5", "Ca", 25., 250e-6, 100e-3),
BufferReaction("jsr", "Fluo5", "Ca", 25.e-3, 110e-3, 110e-3),
BufferReaction("jsr", "CSQN", "Ca", 30.e3, 102e-3, 65.),
#BufferReaction("nsr", "Fluo5", "Ca",25., 250e-6, 100e-3),
BufferReaction("nsr", "Fluo5", "Ca", 25.e-3, 110e-6, 110e-3),
BufferReaction("nsr", "CSQN", "Ca", 6.e3, 102e-3, 65.)
]
domains = ["cyt", "cleft", "jsr", "nsr", "tt"]
double = "" if float_type == np.float32 else "_double"
suffix = ""
#ryr = RyR(Kd_open=80., Kd_close=50.)
#ryr = RyR(Kd_open=105., Kd_close=62.5, k_min_open=1.e-4*0.05, i_unitary= 0.5)
ryr = RyR(Kd_open=105., Kd_close=60, k_min_open=1.e-4*0.05, i_unitary= 0.5) # Kd_open = 60: shifted
#ryr = RyR(Kd_open=105., Kd_close=30.0, k_min_open=1.e-4*0.05, i_unitary= 0.5)
#ryr = RyR(Kd_open=90., Kd_close=62.5)
serca = SERCA(density=1000.) #this pumps at 20uM/10ms, 700um/350ms, perhaps
#write_species_params("parameters_flat_1{}{}.h5".format(double, suffix),
# domains, domain_species, buffers, [ryr, serca])
#write_species_params("parameters_kdo_{}_kdc_{}_i_{}{}{}.h5".format( int(ryr.Kd_open), int(ryr.Kd_close), float(ryr.i_unitary), double, suffix), domains, domain_species, buffers, [ryr, serca])
write_species_params("no_jsr_ca.h5", domains, domain_species, buffers, [ryr, serca])
|
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from matplotlib.ticker import MaxNLocator
import numpy as np
import wx
from . import gTurbSim_wdr as gts_wdr
from .base import ConfigFrame
from ..runInput.main import cfg2tsrun
import copy
from ..main import tsrun
from ..base import tsGrid
class profFigure(object):
def __init__(self, panel, dpi):
self.dpi = dpi
self.fig = Figure((1.9, 3.4), dpi=self.dpi)
self.canvas = FigCanvas(panel, -1, self.fig)
self.fig.set_facecolor('w')
self.axes = self.fig.add_axes([0.24, 0.24, .7, 0.7])
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 0, wx.EXPAND | wx.ALL)
panel.SetSizer(self.sizer)
panel.Fit()
def draw(self, parent):
parent.inputs2config()
ax = self.axes
ax.cla()
tsr = cfg2tsrun(copy.deepcopy(parent.config))
pr = tsr.prof
ax.plot(pr.u[:, pr.grid.ihub[1]], pr.z,
'r+', ms=7)
zmx = np.max(pr.z)
tsrtmp = tsrun()
nz = 100.
tsrtmp.grid = tsGrid(center=tsr.grid.center,
ny=3, dy=tsr.grid.width / 2,
nz=nz, dz=zmx / nz,
dt=1, nt=1000,)
tsrtmp.prof = tsr.profModel
tsrtmp.grid.zhub = tsrtmp.grid.center
tsrtmp.grid.z = np.arange(zmx / 40, zmx * 1.1, zmx / nz)
pr = tsrtmp.prof
ax.plot(pr.u[:, 1], pr.z, 'k-', zorder=-5)
ax.set_xlim([0, None])
ax.set_ylim([0, None])
ax.xaxis.set_major_locator(MaxNLocator(5))
self.axes.set_xlabel('u [m/s]')
self.axes.set_ylabel('z [m]')
self.canvas.draw()
class profConfigFrame(ConfigFrame):
# The keys in this list must match the input file option.
# The keys in this dict should match the keys in the model_aliases dict.
exclude_vars = {
'H2L': ['inp_plexp', 'inp_zjet'],
'LOG': ['inp_plexp', 'inp_zjet'],
'PL': ['inp_zjet', 'inp_ustar'],
'IEC': ['inp_zjet'],
'JET': ['inp_plexp', 'inp_ustar'],
}
save_vars = {
'cho_profmodel': 'WindProfileType',
'inp_refheight': 'RefHt',
'inp_refvel': 'URef',
'inp_ustar': 'UStar',
'inp_plexp': 'PLExp',
'inp_zjet': 'ZJetMax',
'inp_hflowang': 'HFlowAng',
'inp_vflowang': 'VFlowAng',
}
def init_layout(self,):
self.panel = wx.Panel(self) # ,-1,style=wx.NO_BORDER)
gts_wdr.profSetup(self.panel)
@property
def model(self,):
return self.cho_profmodel.value
def init_fig(self,):
dpi = wx.ScreenDC().GetPPI()
self.fig = profFigure(self.pnl_profile, dpi[0])
def set_aliases(self,):
self.cho_profmodel.aliases = {
'H2L': ['H2O Log'],
'PL': ['POWER'],
}
def init_bindings(self,):
wx.EVT_CLOSE(self, self.OnCloseWindow)
self.btn_cancel.Bind(wx.EVT_BUTTON, self.OnCloseWindow)
self.btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
self.inp_zjet.Bind(wx.EVT_TEXT, self.refresh)
self.inp_refheight.Bind(wx.EVT_TEXT, self.refresh)
self.inp_refvel.Bind(wx.EVT_TEXT, self.refresh)
self.inp_plexp.Bind(wx.EVT_TEXT, self.refresh)
self.inp_hflowang.Bind(wx.EVT_TEXT, self.refresh)
self.inp_vflowang.Bind(wx.EVT_TEXT, self.refresh)
self.cho_profmodel.Bind(wx.EVT_CHOICE, self.update)
|
class BaseNumberConverter(TypeConverter):
""" Provides a base type converter for nonfloating-point numerical types. """
def CanConvertFrom(self,*__args):
"""
CanConvertFrom(self: BaseNumberConverter,context: ITypeDescriptorContext,sourceType: Type) -> bool
Determines if this converter can convert an object in the given source type to
the native type of the converter.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
sourceType: A System.Type that represents the type from which you want to convert.
Returns: true if this converter can perform the operation; otherwise,false.
"""
pass
def CanConvertTo(self,*__args):
"""
CanConvertTo(self: BaseNumberConverter,context: ITypeDescriptorContext,t: Type) -> bool
Returns a value indicating whether this converter can convert an object to the
given destination type using the context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
t: A System.Type that represents the type to which you want to convert.
Returns: true if this converter can perform the operation; otherwise,false.
"""
pass
def ConvertFrom(self,*__args):
"""
ConvertFrom(self: BaseNumberConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object) -> object
Converts the given object to the converter's native type.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
culture: A System.Globalization.CultureInfo that specifies the culture to represent the
number.
value: The object to convert.
Returns: An System.Object that represents the converted value.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: BaseNumberConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Converts the specified object to another type.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
culture: A System.Globalization.CultureInfo that specifies the culture to represent the
number.
value: The object to convert.
destinationType: The type to convert the object to.
Returns: An System.Object that represents the converted value.
"""
pass
|
# import unittest
# from proconfig.tests import BasicTest
#
# if __name__ == '__main__':
# unittest.main()
|
# coding=utf-8
import os
import sys
import traceback
from flask import Flask, render_template, send_from_directory
from flask_jsglue import JSGlue
from src.business import Business
from src.entity.building import Building
from src.utilities import checked
app = Flask(__name__, template_folder='../html')
jsglue = JSGlue(app)
buildings: [Building] = []
user_code: str = ''
data_file: str = ''
filter: str = ''
sort_type: str = ''
#########
# Imports
#########
@app.route('/favicon.ico')
def favicon():
"""
:return: Favicon icon
"""
return send_from_directory(app.template_folder, 'favicon.png')
@app.route('/style.css')
def style():
"""
:return: Styles file
"""
return send_from_directory(app.template_folder, 'style.css')
@app.route('/bootstrap.css')
def bootstrap_styles():
"""
:return: Bootstrap looks
"""
return send_from_directory(os.path.join(app.template_folder, 'bootstrap'), 'bootstrap.min.css')
@app.route('/bootstrap.js')
def bootstrap_javascript():
"""
:return: Bootstrap code
"""
return send_from_directory(os.path.join(app.template_folder, 'bootstrap'), 'bootstrap.min.js')
@app.route('/jquery.js')
def jquery():
"""
:return: Jquery code
"""
return send_from_directory(os.path.join(app.template_folder, 'bootstrap'), 'jquery-3.3.1.min.js')
@app.route('/popper.js')
def popper():
"""
:return: Popper code
"""
return send_from_directory(os.path.join(app.template_folder, 'bootstrap'), 'popper.min.js')
@app.route('/noty.js')
def noty():
"""
:return: Notty javascript notifications code
"""
return send_from_directory(os.path.join(app.template_folder, 'bootstrap'), 'noty.min.js')
@app.route('/menu.js')
def menu_javascript():
"""
:return: Menu javascript code
"""
return send_from_directory(os.path.join(app.template_folder, 'code'), 'menu.js')
@app.route('/building.js')
def building_javascript():
"""
:return: Building javascript code
"""
return send_from_directory(os.path.join(app.template_folder, 'code'), 'building.js')
@app.route('/dwelling.js')
def dwelling_javascript():
"""
:return: Dwelling javascript code
"""
return send_from_directory(os.path.join(app.template_folder, 'code'), 'dwelling.js')
#######
# Login
#######
@app.route('/', methods=['GET'])
def index():
""" Intro page header """
return checked(Business.index, 'Zlyhalo získavanie úvodnej stránky.', check_login=False)
@app.route('/', methods=['POST'])
def login():
""" Logging using Intro page """
return checked(Business.login, 'Zlyhalo prihlasovanie.', check_login=False)
######
# Menu
######
@app.route('/menu', methods=['GET'])
def menu():
""" Starting page for selecting building """
return checked(Business.menu, 'Zlyhalo získavanie budov.')
@app.route('/odhlasit', methods=['POST'])
def logout():
""" Returns only filtered out buildings """
return checked(Business.logout, 'Zlyhalo odhlasovanie.')
@app.route('/nacitat', methods=['POST'])
def load_buildings():
""" Load buildings from external xml file """
return checked(Business.load_buildings, 'Zlyhalo načítanie z XML súboru.')
@app.route('/ulozit', methods=['POST'])
def save_buildings():
""" Save buildings to external xml file """
return checked(Business.save_buildings, 'Zlyhalo ukladanie XML súboru.')
@app.route('/menu/filter', methods=['POST'])
def filter_buildings():
""" Set buildings filter """
return checked(Business.filter_buildings, 'Zlyhalo nastavovanie vyhľadávacieho filtera.')
@app.route('/menu/triedit', methods=['POST'])
def sort_buildings():
""" Set buildings sorting method """
return checked(Business.sort_buildings, 'Zlyhalo nastavenie triediacej podmienky.')
@app.route('/menu/budova/pridat', methods=['POST'])
def add_building():
""" Add new building """
return checked(Business.add_building, 'Zlyhalo vytvaranie novej budovy.')
@app.route('/menu/budova/<building_id>/zmazat', methods=['POST'])
def delete_building(building_id: str):
""" Delete a building """
return checked(Business.delete_building, 'Zlyhalo mazanie budovy.', {'building_id': building_id})
@app.route('/menu/budova/<building_id>/update', methods=['POST'])
def update_building(building_id: str):
""" Change building details """
return checked(Business.update_building, 'Zlyhala zmena údajov budovy.', {'building_id': building_id})
###############
# Building menu
###############
@app.route('/menu/budova/<building_id>', methods=['GET'])
def building_screen(building_id: str):
""" Page showing building detail """
return checked(Business.building_screen, 'Zlyhalo načítanie izieb budovy.', {'building_id': building_id})
@app.route('/menu/budova/<building_id>/filter', methods=['POST'])
def filter_dwellings(building_id: str):
""" Set dwelling filter """
return checked(Business.filter_dwellings, 'Zlyhalo nastavovanie vyhľadávacieho filtera.', {'building_id': building_id})
@app.route('/menu/budova/<building_id>/triedit', methods=['POST'])
def sort_dwellings(building_id: str):
""" Set dwelling sorting method """
return checked(Business.sort_dwellings, 'Zlyhalo nastavenie triediacej podmienky.', {'building_id': building_id})
###########
# Dwellings
###########
@app.route('/menu/budova/<building_id>/pridat', methods=['POST'])
def add_dwelling(building_id: str):
""" Adds dwelling to the building """
return checked(Business.add_dwelling, 'Zlyhala tvorba izby.', {'building_id': building_id})
@app.route('/menu/budova/<building_id>/izba/<dwelling_id>/zmazat', methods=['POST'])
def delete_dwelling(building_id: str, dwelling_id: str):
""" Delete a dwelling from building """
return checked(Business.delete_dwelling, 'Zlyhalo mazanie izby.', {'building_id': building_id, 'dwelling_id': dwelling_id})
@app.route('/menu/budova/<building_id>/<dwelling_id>/update', methods=['POST'])
def update_dwelling(building_id: str, dwelling_id: str):
""" Change dwelling details """
return checked(Business.update_dwelling, 'Zlyhala zmena údajov izby.', {'building_id': building_id, 'dwelling_id': dwelling_id})
@app.route('/menu/budova/<building_id>/<dwelling_id>/update_info', methods=['POST'])
def update_dwelling_info(building_id: str, dwelling_id: str):
""" Change dwelling details """
return checked(Business.update_dwelling_info, 'Zlyhala zmena údajov izby.', {'building_id': building_id, 'dwelling_id': dwelling_id})
#################
# Dwelling Screen
#################
@app.route('/menu/budova/<building_id>/izba/<dwelling_id>', methods=['GET'])
def dwelling_screen(building_id: str, dwelling_id: str):
""" Show details about dwelling """
return checked(Business.dwelling_screen, 'Zlyhala zobrazovanie detailu izby.', {'building_id': building_id, 'dwelling_id': dwelling_id})
########
# People
########
@app.route('/menu/budova/<building_id>/izba/<dwelling_id>/pridaj_cloveka', methods=['POST'])
def add_person(building_id: str, dwelling_id: str):
""" Add person to a dwelling """
return checked(
Business.add_person,
'Zlyhalo pridávanie ľudí.',
{'building_id': building_id, 'dwelling_id': dwelling_id}
)
@app.route('/menu/budova/<building_id>/izba/<dwelling_id>/clovek/<person_id>', methods=['POST'])
def update_person(building_id: str, dwelling_id: str, person_id: str):
""" Change person data """
return checked(
Business.update_person,
'Zlyhala zmena údajov ubytovaného.',
{'building_id': building_id, 'dwelling_id': dwelling_id, 'person_id': person_id}
)
@app.route('/menu/budova/<building_id>/izba/<dwelling_id>/clovek/<person_id>/vyhod_cloveka', methods=['POST'])
def delete_person(building_id: str, dwelling_id: str, person_id: str):
""" Remove a person from dwelling """
return checked(
Business.delete_person,
'Zlyhalo mazanie ubytovaného.',
{'building_id': building_id, 'dwelling_id': dwelling_id, 'person_id': person_id}
)
################
# Error handling
################
@app.errorhandler(404)
def page_not_found(error):
print(error, file=sys.stderr)
traceback.print_exc()
return render_template('error.html', error='404 Stránka sa nenašla.')
@app.errorhandler(Exception)
def webapp_error(error):
print(error, file=sys.stderr)
traceback.print_exc()
return render_template('error.html', error='500 Nastala chyba servera.')
|
"""
converts logfile to results for pasting into a spreadsheet
"""
import os
import json
from os import path
from os.path import join
from collections import defaultdict
import argparse
from mylib import file_utils
def run(logfile, episodes, keys, average_over, logdir='logs'):
if logfile is None:
hostname = os.uname().nodename
print('hostname', hostname)
files = file_utils.get_date_ordered_files(logdir)
files.reverse()
for file in files:
if hostname in file:
filepath = join(logdir, file)
logfile = filepath
break
print(logfile)
with open(logfile, 'r') as f:
all_rows = f.read().split('\n')
print('num rows', len(all_rows))
all_rows = [row.strip() for row in all_rows]
all_rows = [json.loads(row) for row in all_rows[1:] if row != '']
# episodes = set(episodes)
episodes = [e for e in episodes if e < all_rows[-1]['episode']]
all_episodes = list(episodes)
print('episodes', episodes)
rows = []
buffer = [] # for average_over
for n in range(len(all_rows) - 1, 0, -1):
if all_rows[n]['episode'] <= episodes[-1]:
buffer.append(all_rows[n])
# rows.append(all_rows[n])
if len(buffer) >= average_over:
# summed = defaultdict(float)
summed = {}
for row in buffer:
for k in row.keys():
if k not in summed:
summed[k] = row[k]
else:
summed[k] += row[k]
averaged = {}
for k, v in summed.items():
averaged[k] = v / average_over
rows.append(averaged)
buffer = []
episodes = episodes[:-1]
if len(episodes) == 0:
break
rows.reverse()
for row in rows:
print(row)
print('')
print('episode')
for e in all_episodes:
print(e)
print('')
for key in keys:
print(key)
for row in rows:
print('%.3f' % row[key])
print('')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--logfile', type=str)
parser.add_argument('--average-over', type=int, default=1, help='how many records to average over (for smoothing)')
parser.add_argument('--keys', type=str, default='average_reward,steps_avg,foods_eaten_avg,cactuses_chopped_avg,exited_avg,avg_utt_len')
parser.add_argument('--episodes', type=str, default='1000,3000,5000,8000,12000,20000,25000,30000,50000,80000,100000,130000,150000,200000')
args = parser.parse_args()
args.keys = args.keys.split(',')
args.episodes = [int(v) for v in args.episodes.split(',')]
run(**args.__dict__)
|
# This line is needed to use GUI
# ↓ บรรทัดนี้จำเป็นต้องใช้ GUI
from tkinter import *
# This line is needed to get random numbers
# ↓ บรรทัดนี้จำเป็นสำหรับการรับตัวเลขแบบสุ่ม
import random
# This is main part of program
# These 2 lines are making screen outline.
# These are necessary for GUI program.
# 2 บรรทัดเหล่านี้กำลังสร้างเค้าโครงหน้าต่าง GUI
# สิ่งเหล่านี้จำเป็นสำหรับโปรแกรม GUI
root = Tk(className='shooting star simulator ver1')
root.geometry("400x400") # ความกว้าง=400 ความสูง=400
# ##########################
# ### Create GUI Parts ###
# ### (สร้างส่วน GUI) ###
# ##########################
# สร้างดาว เป็น ★ label
label_shooting_star = Label(root, text="★")
# default positon is top-left
# ตำแหน่งเริ่มต้น เป็นด้านซ้ายบน
label_shooting_star.place(x=0, y=0)
# ########################################
# ### The Core of Animation Structure ###
# ### (โครงสร้างภาพเคลื่อนไหว) ###
# ########################################
# This function repeats every 10 milliseconds
# ฟังก์ชันนี้จะทำซ้ำทุก ๆ 10 มิลลิวินาที
def move():
# Move the star to right-down side (x-axis +3, y-axis +3)
# เลื่อนดาวไปทางด้านขวาลง
# winfo_x() และ winfo_y() คือตำแหน่งปัจจุบันของดาว
position = (label_shooting_star.winfo_x() + 3,
label_shooting_star.winfo_y() + 3)
label_shooting_star.place(x=position[0], y=position[1])
# Check if the star is out of screen
# ตรวจสอบว่า ดาวอยู่นอกหน้าต่าง หรือไม่
if position[0] > 400 or position[1] > 400:
# If the star is out of screen, reset it to a random position
# หากดาวอยู่นอกหน้าต่าง ให้รีเซ็ตดาวนั้นเป็นตำแหน่งสุ่ม
random_y = random.randint(0, 200)
label_shooting_star.place(x=0, y=random_y)
# After 10 milliseconds, "move" function will be called again
# หลังจาก 10 มิลลิวินาทีฟังก์ชัน "move" จะถูกเรียกอีกครั้ง
root.after(10, move)
# Need to manually call the “move” function once to start the “after” loop
# ต้องเรียกใช้ฟังก์ชั่น "move" หนึ่งครั้ง เพื่อเริ่มต้นวนซ้ำ "after"
move()
# This is an also necessary line for GUI program.
# (This line must be placed at the end of GUI program)
# นี่เป็นบรรทัดที่จำเป็นสำหรับโปรแกรม GUI
# (บรรทัดนี้ควรอยู่ที่ท้ายโปรแกรม GUI)
root.mainloop()
|
from typing import List
class InsertionSort:
def sort(self, ary: List[int]) -> None:
n: int = len(ary)
for i in range(1, n):
b: int = ary[i]
pos: int = self.insert_pos_of(ary, 0, i)
ary[pos] = b
@staticmethod
def insert_pos_of(ary: List[int], st: int, idx: int) -> int:
"""
Find the insert position of an element in the sorted array.
:param ary:
:param st: The start position of the sorted array.
:param idx: The index of the element to be inserted. The element should be next to the end element of
the sorted array
:return: The index of insert position
"""
b: int = ary[idx]
idx -= 1
while idx >= st:
a = ary[idx]
if a > b:
ary[idx + 1] = a
else:
break
idx -= 1
return idx + 1
|
#!/usr/bin/python3
# scrapeOMDB.py - parses a movie and year from arguments and returns JSON
import requests
URL_BASE = 'http://www.omdbapi.com/?'
def OMDBmovie(mTitle, mYear):
"""Gets movie info from omdbapi.com
Arguments:
mTitle: Title of the movie to match
mYear: Year the movie was released
Returns:
a dictionary with key value pairs matching return from OMDB
"""
# Craft the URL (with full plot and json response)
url = URL_BASE + 't=' + mTitle + '&y=' + str(mYear) + '&plot=full&r=json'
# Try to get the url
response = requests.get(url)
response.raise_for_status()
return(response.json())
def OMDBtv(tvTitle, tvSeason, tvEpisode):
"""Gets tv info from omdbapi.com
Arguments:
tvTitle: Title of the TV series to match
tvSeason: Season number of the TV show
tvEpisode: Episode number of the TV show
Returns:
a dictionary with key value pairs matching return from OMDB
"""
# Craft the URL (with full plot and json response)
url = URL_BASE + 't=' + tvTitle + '&Season=' + str(tvSeason) + '&Episode=' + str(tvEpisode) + '&plot=full&r=json'
# Try to get the url
response = requests.get(url)
response.raise_for_status()
return(response.json())
def OMDBid(IMDB_id):
"""Gets media info from omdbapi.com
Arguments:
IMDB_id: IMDB id of media to match
Returns:
a dictionary with key value pairs matching return from OMDB
"""
# Craft the URL (with full plot and json response)
url = URL_BASE + 'i=' + IMDB_id + '&plot=full&r=json'
# Try to get the url
response = requests.get(url)
response.raise_for_status()
return(response.json())
|
from PreprocessData.all_class_files.Event import Event
import global_data
class PublicationEvent(Event):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, about=None, actor=None, aggregateRating=None, attendee=None, audience=None, composer=None, contributor=None, director=None, doorTime=None, duration=None, endDate=None, eventStatus=None, funder=None, inLanguage=None, isAccessibleForFree=None, location=None, maximumAttendeeCapacity=None, offers=None, organizer=None, performer=None, previousStartDate=None, recordedIn=None, remainingAttendeeCapacity=None, review=None, sponsor=None, startDate=None, subEvent=None, superEvent=None, translator=None, typicalAgeRange=None, workFeatured=None, workPerformed=None, publishedOn=None):
Event.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, about, actor, aggregateRating, attendee, audience, composer, contributor, director, doorTime, duration, endDate, eventStatus, funder, inLanguage, isAccessibleForFree, location, maximumAttendeeCapacity, offers, organizer, performer, previousStartDate, recordedIn, remainingAttendeeCapacity, review, sponsor, startDate, subEvent, superEvent, translator, typicalAgeRange, workFeatured, workPerformed)
self.isAccessibleForFree = isAccessibleForFree
self.publishedOn = publishedOn
def set_isAccessibleForFree(self, isAccessibleForFree):
self.isAccessibleForFree = isAccessibleForFree
def get_isAccessibleForFree(self):
return self.isAccessibleForFree
def set_publishedOn(self, publishedOn):
self.publishedOn = publishedOn
def get_publishedOn(self):
return self.publishedOn
def __setattr__(self, key, value_list):
if type(value_list).__name__ == "NoneType" or key == "node_id":
self.__dict__[key] = value_list
return
for value in value_list:
str_value = type(value).__name__
if str_value not in global_data.get_table()[key]:
raise ValueError("非法类型!")
self.__dict__[key] = value_list
|
#!/bin/env python
import json
import logging
import unittest
logging.basicConfig(level=logging.INFO)
# os.environ["WEBSITE_ENV"] = "Local"
# NOTE: because of the FLASK_APP.config.from_object(os.environ['APP_SETTINGS'])
# directive in the api code, importing the flask app must happen AFTER
# the os.environ Config above.
from app import app
from app import login_manager
from test_flask_app import AutomatedTestingUser
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
login_manager.anonymous_user = AutomatedTestingUser
cls.client = app.test_client()
assert 'postgres' in app.config['SQLALCHEMY_DATABASE_URI']
@classmethod
def tearDownClass(cls):
pass
def DISABLED_TOO_SLOW_test_titin_plate(self):
""" assumes some stuff about fixtures """
uri = '/api/v1/basic-plate-info/SRN-WARP1-TEST1'
rv = self.client.get(uri,
content_type="application/json")
assert rv.status_code == 200
result = json.loads(rv.data)
assert "wells" in result
wells = result["wells"]
# assert wells[6]['sample_id'] == 'GA_WARP1_TEST1_0007'
assert wells[6]['column_and_row'] == '7'
# assert wells[383]['sample_id'] == 'GA_WARP1_TEST1_0384'
assert wells[383]['column_and_row'] == '384'
# assert wells[384]['sample_id'] == 'GA_WARP1_TEST1_0385'
assert wells[384]['column_and_row'] == '385'
# assert wells[6143]['sample_id'] == 'GA_WARP1_TEST1_6144'
assert wells[6143]['column_and_row'] == '6144'
if __name__ == '__main__':
unittest.main()
|
import torch
import numpy as np
from sklearn.metrics import make_scorer, accuracy_score, classification_report
from sklearn.model_selection import train_test_split, cross_validate, learning_curve
from torch.utils.data import Dataset
from sklearn.ensemble import RandomForestClassifier
from collections import Counter
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from hyper_params import HyperParams
params = HyperParams()
from embeddings.dataloader import TheDataSet
from embeddings.autoencoder import Autoencoder # so we can load this model
USE_AUTOENCODER = False
def get_autoencoder():
autoencoder = None
with open('../data/autoencoder.pic', 'rb') as f:
autoencoder = torch.load(f)
return autoencoder
def load_data():
dataset = TheDataSet(datafile='data/fulldata.npy', pad_to_360=False)
# dataset = TheDataSet(datafile='data/labdata.npy', pad_to_360=False)
# dataset = TheDataSet(datafile='data/fulldata_initial.npy', pad_to_360=False)
data_loader = torch.utils.data.DataLoader(dataset)
if USE_AUTOENCODER:
print("Using AutoEncoder")
autoencoder = get_autoencoder()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
get_x = lambda x: autoencoder.encoder(x.float().to(device)).detach().to('cpu').numpy()
else:
print("Using Plain Embeddings")
get_x = lambda x: x.detach().numpy()
alldata = [(get_x(r1), r2.detach().numpy()) for r1, r2 in data_loader]
X, y = list(zip(*alldata))
X = np.concatenate(X)
y = np.concatenate(y)
return X, y
def print_accuracy(cv_scores):
# print("Accuracy: %0.2f (+/- %0.2f)" % (cv_scores.mean(), cv_scores.std() * 2))
for score_name, scores in cv_scores.items():
print("%s: %0.2f (+/- %0.2f)" % (score_name, scores.mean(), scores.std() * 2))
X, y = load_data()
print(y.dtype)
import pandas as pd
df_X = pd.DataFrame(X)
df_y = pd.DataFrame(y, columns=['y'])
df_data = pd.concat([df_X, df_y], axis=1)
from dataproc import sampling
params.validation_set_fraction=0.29
params.test_set_fraction=0.01
params.negative_to_positive_ratio=2
print(params.__dict__)
# train_set, df_validation, df_test = sampling.generate_samples(df_dataset=df_data,
# negative_to_positive_ratio=params.negative_to_positive_ratio,
# test_set_fraction=params.test_set_fraction,
# validation_set_fraction=params.validation_set_fraction,
# random_state=params.random_state,
# by_column='y')
X_train, X_validate, y_train, y_validate = train_test_split(df_X, df_y, stratify=df_y, test_size=0.29, random_state=params.random_state)
print(f"Train count: {Counter(y_train['y'])}")
print(f"Validate count: {Counter(y_validate['y'])}")
# X_train = train_set.drop(columns=['y'])
# y_train = train_set['y'].astype('int')
# X_validate = df_validation.drop(columns=['y'])
# y_validate = df_validation['y'].astype('int')
print(np.mean(y_validate['y']))
print(f"Train shape: {X_train.shape}")
print(f"Validate shape: {X_validate.shape}")
def train_random_forest():
print("Training Random Forest")
if USE_AUTOENCODER:
rf_params = dict( class_weight='balanced_subsample',
n_estimators=100,
max_depth=4,
# max_leaf_nodes=70,
max_features=0.8,
max_samples=0.9,
min_samples_leaf=10,
#min_samples_split=15,
n_jobs=2
)
else:
rf_params = dict(n_estimators=200,
max_depth=10,
max_leaf_nodes=90,
max_features=0.3,
max_samples=0.9,
min_samples_leaf=10,
min_samples_split=10,
random_state=7,
n_jobs=2)
rf = RandomForestClassifier(**rf_params)
rf.fit(X_train, y_train)
return rf
def train_xgboost():
import xgboost as xgb
print("Training XGBoost")
param_dist = dict(objective='binary:logistic',
n_estimators=100, # 170,
eval_metric='rmsle', # 'logloss',
max_depth=5,
eta=0.3,
booster='gbtree',
n_jobs=4,
# subsample=0.8,
# colsample_bynode=0.5
)
xgboost_cls = xgb.XGBClassifier(**param_dist)
xgboost_cls.fit(X_train, y_train)
return xgboost_cls
# model = train_random_forest()
model = train_xgboost()
y_validate_hat = model.predict(X_validate)
print(f"predictions mean: {np.mean(y_validate_hat)}")
simple_score = model.score(X_validate, y_validate)
print(f"simple_score: {simple_score}")
# scoring = {'AUC': 'roc_auc', 'Accuracy': 'accuracy', 'Precision': 'precision', 'Recall': 'recall'}
scoring = ['roc_auc','accuracy','precision', 'recall', 'f1']
cv_scores = cross_validate(model, X_train, y_train['y'], scoring=scoring)
print_accuracy(cv_scores)
print(classification_report(y_validate, y_validate_hat))
|
"""
e.g set the 0 th bit for 6
6: 110
: 001
----- | (or)
111
Here from the binary representation of 6 we just changed the last 0 bit
Which is the same as
6 : 110
1<<0: 001
----- | (or)
111
so if we say set the 2nd bit for 6 , it would be
6 : 011
1<<2: 100
----- | (or)
111
"""
def set_nth_bit(x: int, n: int):
# return x | 1 << n
return bin(x| 1<<n)[2:]
#print it in binary with removing the inital 0b
print(set_nth_bit(6,0))
print(set_nth_bit(6,2))
|
import sys
import socket
from qt_material import apply_stylesheet
from PySide6.QtCore import Slot, QObject, Signal, QEvent
from PySide6.QtWidgets import QMainWindow, QApplication, QListWidgetItem, QWidget
from PySide6.QtNetwork import QTcpServer
from PySide6.QtGui import Qt, QKeyEvent
from server_form import Ui_mainWindow
def pressed(widget : QWidget):
class Filter(QObject):
pressed = Signal(QKeyEvent)
def eventFilter(self, watched: QObject, event: QEvent) -> bool:
if watched == widget and event.type() == QKeyEvent.KeyPress:
self.pressed.emit(QKeyEvent(event))
return super().eventFilter(watched, event)
filter = Filter(widget)
widget.installEventFilter(filter)
return filter.pressed
class Mainwindow(QMainWindow, Ui_mainWindow):
def __init__(self):
super(Mainwindow, self).__init__()
self.setupUi(self)
self._client_list = list()
self._server = QTcpServer()
self._server.listen(port = 9500)
self._server.newConnection.connect(self.newConnection_handler)
self.btn_message.clicked.connect(self.button_handler)
self.server_ip = socket.gethostbyname(socket.gethostname())
pressed(self.lineEdit_message).connect(self.pressed_handler)
self.label.setText(f"서버 주소 : {self.server_ip}")
@Slot(QKeyEvent)
def pressed_handler(self, Key_Event : QKeyEvent):
if Key_Event.key() == Qt.Key_Return:
self.btn_message.click()
@Slot()
def button_handler(self):
if len(self.lineEdit_message.text()) > 0:
text = "운영자 : "
text += self.lineEdit_message.text()
item = QListWidgetItem()
if text[-5:] == "@list":
message = f"현재 연결 수 : {len(self._client_list)}"
item.setText(message)
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
self.lineEdit_message.clear()
elif text[-5:] == "@help":
text = "@list : 연결된 수 \n@list_info : 닉네임, ip 표시"
item.setText(text)
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
self.lineEdit_message.clear()
else:
item.setText(text)
item.setTextAlignment(Qt.AlignRight)
for client in self._client_list:
client.write(text.encode())
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
self.lineEdit_message.clear()
@Slot()
def newConnection_handler(self):
self._client_list.append(self._server.nextPendingConnection())
add_message = "새로운 사용자가 연결 하였습니다. !"
for clients in self._client_list:
clients.write(add_message.encode())
item = QListWidgetItem()
item.setText(add_message)
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
for client in self._client_list:
client.readyRead.connect(self.client_readyRead_handler)
self.disconnect_client = client.disconnected.connect(self.disconnected_handler)
@Slot()
def client_readyRead_handler(self):
for read_client in self._client_list:
if read_client.bytesAvailable():
# 명령어 코드 읽고 판단해줘야함 !
data = bytes(read_client.readAll())
data_decode = data.decode()
item = QListWidgetItem()
if data_decode[0] == "@":
data_decode = data_decode[1:]
data_decode = data_decode.split("_")
item.setText(f"{data_decode[0]} -> {data_decode[1]}")
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
elif data_decode[0] == "!":
data_decode = data_decode[1:]
if data_decode[-3:] == "!pc":
data_decode = data_decode.split(":")
item.setText(f"{data_decode[0]}에서 인원수 조사함 !")
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
message = f"서버 : {len(self._client_list)}명이 연결되어있습니다."
read_client.write(message.encode())
elif data_decode[-5:] == "!help":
message = "!pc : 접속된 사용자의 수\n"
read_client.write(message.encode())
else:
item.setText(data_decode)
item.setTextAlignment(Qt.AlignLeft)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
#message_clinet 는 서버에서 메시지를 보낼 클라이언트임
for message_client in self._client_list:
if not message_client == read_client:
message_client.write(data_decode.encode())
else:
item = QListWidgetItem()
item.setText("↖"+data_decode+"↗")
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
@Slot()
def disconnected_handler(self):
add_message = "한명의 사용자가 떠났습니다... 흙흙"
item = QListWidgetItem()
item.setText(add_message)
item.setTextAlignment(Qt.AlignCenter)
self.listWidget.addItem(item)
self.listWidget.scrollToBottom()
disconnect_client = self.sender()
self._client_list.remove(disconnect_client)
for clients in self._client_list:
clients.write(add_message.encode())
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Mainwindow()
#app.setStyle('Fusion')
apply_stylesheet(app, theme = 'dark_teal.xml')
window.show()
app.exec()
|
from . import test_history
|
import os
import shutil
import subprocess
import tarfile
import start_new_module_util
COMPARISON_FILES = "comparison_files_old"
SUBVERSION_SCRIPT_BASE = ("dls-python /dls_sw/prod/common/python/RHEL6-x86_64/"
"dls_scripts/3-21/prefix/lib/python2.7/"
"site-packages/dls_scripts-3.21-py2.7.egg/"
"py_scripts/dls_start_new_module.py -n ")
def call_start_new_module(call_args):
"""Call the start_new_module script with the given args.
Note:
This script expects you to have the DEFAULT environment variables, so
dls_start_new_module.py is the svn version of the script.
Args:
call_args: A string giving the arguments for the script.
"""
call = (SUBVERSION_SCRIPT_BASE + call_args).split()
subprocess.check_call(call)
# Expected module path in comment.
call_dict = {
'--area=python dls_test_python_module': "dls_test_python_module",
'--area=support test_support_module': "test_support_module",
'--area=tools test_tools_module': "test_tools_module",
'--area=ioc testB21/BL': "testB21",
'--area=ioc testB22-BL-IOC-01': "testB22",
}
if __name__ == "__main__":
os.mkdir(COMPARISON_FILES)
cwd = os.getcwd()
os.chdir(COMPARISON_FILES)
tar_list = []
for call_arg, folder in call_dict.iteritems():
call_start_new_module(call_arg)
tar_list.append(folder)
user_login = os.getlogin()
current_dir = os.getcwd()
# Swap instances of user login to USER_LOGIN_NAME
start_new_module_util.find_and_replace_characters_in_folder(
user_login,
"USER_LOGIN_NAME",
current_dir
)
os.chdir(cwd)
tar_name = COMPARISON_FILES + ".tar.gz"
# Tarball the resultant folder
with tarfile.open(tar_name, "w:gz") as tar:
for file_name in tar_list:
tar.add(os.path.join(COMPARISON_FILES, file_name))
shutil.rmtree(COMPARISON_FILES)
|
from typing import Type # noqa: F401
from evm.vm.state import BaseState # noqa: F401
from evm.vm.forks.homestead import HomesteadVM
from .state import TangerineWhistleState
class TangerineWhistleVM(HomesteadVM):
# fork name
fork = 'tangerine-whistle' # type: str
# classes
_state_class = TangerineWhistleState # type: Type[BaseState]
|
import unittest
from Decorators.decorators import do_twice
import pprint
@do_twice
def say_whee():
print("Whee!")
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
pass
def test_decorate(self):
say_whee()
if __name__ == '__main__':
unittest.main()
|
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class DeviceDirectoryAPI:
"""
A class that provides Device directory related functionality.
https://www.pelion.com/docs/device-management/current/service-api-references/device-directory.html
"""
def __init__(self, rest_api):
"""
Initializes the Device Directory library
:param rest_api: RestAPI object
"""
self.api_version = 'v3'
self.cloud_api = rest_api
def get_device(self, device_id, headers=None, expected_status_code=None):
"""
Get device
:param device_id: Device id
:param headers: Override default header fields
:param expected_status_code: Asserts the result's status code
:return: GET /devices/{device_id} response
"""
api_url = '/{}/devices/{}'.format(self.api_version, device_id)
r = self.cloud_api.get(api_url, headers, expected_status_code)
return r
def delete_device(self, device_id, headers=None, expected_status_code=None):
"""
Delete device
:param device_id: Device id
:param headers: Override default header fields
:param expected_status_code: Asserts the result's status code
:return: DELETE /devices/{device_id} response
"""
api_url = '/{}/devices/{}'.format(self.api_version, device_id)
r = self.cloud_api.delete(api_url, headers, expected_status_code)
return r
|
#!/usr/bin/env python
'''
Setup script for the lib99ocl package
'''
__author__ = 'Marcos Romero Lamas'
__email__ = 'marromlam@gmail.com'
__license__ = 'MIT License Copyright (c) 2021 Marcos Romero Lamas'
# Modules {{{
import os
import sys
import setuptools
import subprocess
import textwrap
# }}}
# Class {{{
class CheckFormatCommand(setuptools.Command):
'''
Check the format of the files in the given directory. This script takes only
one argument, the directory to process. A recursive look-up will be done to
look for python files in the sub-directories and determine whether the files
have the correct format.
'''
description = 'check the format of the files of a certain type in a given directory'
user_options = [
('directory=', 'd', 'directory to process'),
('file-type=', 't', 'file type (python|all)'),
]
def initialize_options(self):
'''
Running at the begining of the configuration.
'''
self.directory = None
self.file_type = None
def finalize_options(self):
'''
Running at the end of the configuration.
'''
if self.directory is None:
raise Exception('Parameter --directory is missing')
if not os.path.isdir(self.directory):
raise Exception('Not a directory {}'.format(self.directory))
if self.file_type is None:
raise Exception('Parameter --file-type is missing')
if self.file_type not in ('python', 'all'):
raise Exception('File type must be either "python" or "all"')
def run(self):
'''
Execution of the command action.
'''
matched_files = []
for root, _, files in os.walk(self.directory):
for f in files:
if self.file_type == 'python' and not f.endswith('.py'):
continue
matched_files.append(os.path.join(root, f))
process = subprocess.Popen(['autopep8', '--diff'] + matched_files,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode < 0:
raise RuntimeError('Call to autopep8 exited with error {}\nMessage:\n{}'.format(
abs(returncode), stderr))
if len(stdout):
raise RuntimeError(
'Found differences for files in directory "{}" with file type "{}"'.format(self.directory, self.file_type))
# }}}
# Version {{{
# Version of the package. Before a new release is made just the version_list
# must be changed. The options for the fourth tag are "dev", "alpha", "beta"
# and "final".
version_list = [0, 0, 0, 'dev', 7]
VERSION = f"{version_list[0]}.{version_list[1]}.{version_list[2]}"
tag = version_list[3]
if tag != 'final':
if tag in ('alpha', 'beta', 'dev'):
VERSION += f"{tag}{version_list[-1]}"
else:
raise ValueError(f'Unable to parse version tuple {version_list}')
# }}}
# Helpers {{{
def create_version_file():
'''
Create the file version.py given the version of the package.
'''
version_file = open('lib99ocl/version.py', 'wt')
version_file.write(textwrap.dedent("""\
'''
Auto-generated module holding the version of the lib99ocl package
'''
VERSION = "{}"
VERSION_INFO = {}
__all__ = ['VERSION', 'VERSION_INFO']
""".format(VERSION, version_list)))
version_file.close()
def install_requirements():
'''
Read installation requirements from "requirements.txt" file.
'''
requirements = []
with open('requirements.txt') as f:
for line in f:
li = line.strip()
if not li.startswith('#'):
requirements.append(li)
return requirements
# }}}
# Setup package {{{
def setup_package():
'''
Set up the package.
'''
metadata = dict(
name='lib99ocl',
version=VERSION,
author=__author__,
author_email=__email__,
url='https://github.com/marromlam/py-lib99ocl.git',
download_url='https://github.com/marromlam/py-lib99ocl.git',
install_requires=install_requirements(),
python_requires='>=3.5',
license=__license__,
description='OpenCL C99 library',
long_description=open('README.rst').read() + "\n\n" + __license__,
long_description_content_type = 'text/x-rst',
platforms=['Linux', 'macOS', 'Windows'],
keywords='computational physics',
cmdclass={'check_format': CheckFormatCommand},
include_package_data=True,
packages=['lib99ocl']
)
create_version_file()
setuptools.setup(**metadata)
if __name__ == '__main__':
setup_package()
# }}}
# vim:foldmethod=marker
|
# -*- coding: utf-8 -*-
import functools
import testutils.helpers as helpers
def as_someone(user_types):
"""Decorator to mimic different user roles.
"""
def the_decorator(func):
@functools.wraps(func)
def func_wrapper(view, *args, **kwargs):
cached_user = view.user
for user_type in user_types:
view.user_type = user_type
view.user = view.users[user_type]
view._client = helpers.api_client(view.user, uuid=view.workspace.id)
func(view, *args, **kwargs)
view.user = cached_user
view._client = helpers.api_client(view.user)
return func_wrapper
return the_decorator
|
import asyncio
import time
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional
from bluesky.protocols import (
Descriptor,
Movable,
Readable,
Reading,
Stageable,
Stoppable,
)
from ophyd.v2.core import (
Ability,
AsyncStatus,
CachedSignal,
ReadableSignal,
SignalRO,
call_in_bluesky_event_loop,
in_bluesky_event_loop,
)
from .devices import Motor
@dataclass
class CachedMotorSignals:
readback: CachedSignal
velocity: CachedSignal
egu: CachedSignal
class MovableMotor(Ability, Movable, Readable, Stoppable, Stageable):
def __init__(self, device: Motor):
self.device: Motor = device
self._trigger_task: Optional[asyncio.Task[float]] = None
self._set_success = True
self._cache: Optional[CachedMotorSignals] = None
def readable_signal(self, name: str) -> Readable:
signal = getattr(self.device, name)
assert isinstance(signal, SignalRO)
return ReadableSignal(signal, f"{self.name}-{name}")
def __getitem__(self, name: str) -> Any:
if in_bluesky_event_loop():
raise KeyError(
f"Can't get {self.name}['{name}'] from inside RE, "
f"use bps.rd({self.name}.readable_signal('{name}'))"
)
try:
signal = getattr(self.device, name)
except AttributeError:
raise KeyError(f"{self.name} has no Signal {name}")
assert isinstance(signal, SignalRO)
return call_in_bluesky_event_loop(signal.get_value())
def stage(self):
# Start monitoring signals
self._cache = CachedMotorSignals(
readback=CachedSignal(self.device.readback),
velocity=CachedSignal(self.device.velocity),
egu=CachedSignal(self.device.egu),
)
def unstage(self):
self._cache = None
async def read(self) -> Dict[str, Reading]:
assert self.name and self._cache, "stage() not called or name not set"
return {self.name: await self._cache.readback.get_reading()}
async def describe(self) -> Dict[str, Descriptor]:
assert self.name and self._cache, "stage() not called or name not set"
return {self.name: await self._cache.readback.get_descriptor()}
async def read_configuration(self) -> Dict[str, Reading]:
assert self.name and self._cache, "stage() not called or name not set"
return {
f"{self.name}-velocity": await self._cache.velocity.get_reading(),
f"{self.name}-egu": await self._cache.egu.get_reading(),
}
async def describe_configuration(self) -> Dict[str, Descriptor]:
assert self.name and self._cache, "stage() not called or name not set"
return {
f"{self.name}-velocity": await self._cache.velocity.get_descriptor(),
f"{self.name}-egu": await self._cache.egu.get_descriptor(),
}
def set(self, new_position: float, timeout: float = None) -> AsyncStatus[float]:
start = time.time()
watchers: List[Callable] = []
async def update_watchers(old_position):
units, precision = await asyncio.gather(
self.device.egu.get_value(), self.device.precision.get_value()
)
async for current_position in self.device.readback.observe_value():
for watcher in watchers:
watcher(
name=self.name,
current=current_position,
initial=old_position,
target=new_position,
unit=units,
precision=precision,
time_elapsed=time.time() - start,
)
async def do_set():
old_position = await self.device.demand.get_value()
t = asyncio.create_task(update_watchers(old_position))
try:
await self.device.demand.put(new_position)
finally:
t.cancel()
if not self._set_success:
raise RuntimeError("Motor was stopped")
self._set_success = True
status = AsyncStatus(asyncio.wait_for(do_set(), timeout=timeout), watchers)
return status
async def stop(self, success=False) -> None:
self._set_success = success
await self.device.stop()
|
# -*- coding: utf-8 -*-
# Backwards compatibility imports
from __future__ import absolute_import, division, print_function
from builtins import *
import unittest
import os
TEST_PATTERN = 'test*'
CURRENT_DIR = os.path.dirname(__file__)
def suite():
global CURRENT_DIR, TEST_PATTERN
# Inits the loader and suite
loader = unittest.TestLoader()
suite = unittest.TestSuite()
current_dir = os.path.dirname(__file__)
# Discovers all the tests
tests = loader.discover(start_dir=CURRENT_DIR, pattern=TEST_PATTERN)
# Adds all the tests found
suite.addTests(tests)
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-19 23:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ucsrb', '0004_focusarea_description'),
]
operations = [
migrations.CreateModel(
name='PourPointBasin',
fields=[
('ppt_ID', models.IntegerField(primary_key=True, serialize=False, verbose_name='Pour Point ID')),
('area', models.FloatField(blank=True, default=None, null=True, verbose_name='Area in Acres')),
('mean_elev', models.IntegerField(blank=True, default=None, null=True, verbose_name='Mean Elevation')),
('avg_slp', models.FloatField(blank=True, default=None, null=True, verbose_name='Average Slope')),
('slp_gt60', models.IntegerField(default=0, verbose_name='Percent Area w/ Slope > 60%')),
('elev_dif', models.IntegerField(blank=True, default=None, null=True, verbose_name='Elevation Difference in Meters')),
('mean_shade', models.IntegerField(blank=True, default=None, null=True, verbose_name='Mean Shaded Area')),
('veg_prop', models.IntegerField(blank=True, default=None, null=True, verbose_name='Percent Forested')),
('thc_11', models.IntegerField(verbose_name='Topo-Height Class 11')),
('thc_12', models.IntegerField(verbose_name='Topo-Height Class 12')),
('thc_13', models.IntegerField(verbose_name='Topo-Height Class 13')),
('thc_14', models.IntegerField(verbose_name='Topo-Height Class 14')),
('thc_15', models.IntegerField(verbose_name='Topo-Height Class 15')),
('thc_21', models.IntegerField(verbose_name='Topo-Height Class 21')),
('thc_22', models.IntegerField(verbose_name='Topo-Height Class 22')),
('thc_23', models.IntegerField(verbose_name='Topo-Height Class 23')),
('thc_24', models.IntegerField(verbose_name='Topo-Height Class 24')),
('thc_25', models.IntegerField(verbose_name='Topo-Height Class 25')),
('fc_11', models.IntegerField(verbose_name='Fractional Coverage 11')),
('fc_12', models.IntegerField(verbose_name='Fractional Coverage 12')),
('fc_13', models.IntegerField(verbose_name='Fractional Coverage 13')),
('fc_14', models.IntegerField(verbose_name='Fractional Coverage 14')),
('fc_15', models.IntegerField(verbose_name='Fractional Coverage 15')),
('fc_21', models.IntegerField(verbose_name='Fractional Coverage 21')),
('fc_22', models.IntegerField(verbose_name='Fractional Coverage 22')),
('fc_23', models.IntegerField(verbose_name='Fractional Coverage 23')),
('fc_24', models.IntegerField(verbose_name='Fractional Coverage 24')),
('fc_25', models.IntegerField(verbose_name='Fractional Coverage 25')),
('dwnst_ppt', models.IntegerField(blank=True, default=None, null=True, verbose_name='Downstream Pourpoint ID')),
],
),
]
|
from io import BytesIO
from typing import Tuple
def lerp(x: int, x0: int, x1: int, y0: float, y1: float):
t = (x - x0) / (x1 - x0)
return (1-t) * y0 + t * y1
def bytes_to_int(byte, issigned=False):
return int.from_bytes(byte, byteorder="big", signed=issigned)
def bytes_to_float(byte, src: Tuple[int, int], dest: Tuple[float, float]):
i = int.from_bytes(byte, byteorder="big", signed=(src[0] < 0))
return lerp(i, src[0], src[1], dest[0], dest[1])
def bytes_to_str(byte):
return byte.decode("utf-8")
def read_len(klv_stream: BytesIO):
length = bytes_to_int(klv_stream.read(1))
if length >= 128:
length = bytes_to_int(klv_stream.read(length - 128))
return length
def read_ber_oid(klv_stream: BytesIO):
byte = bytes_to_int(klv_stream.read(1))
if byte < 128:
return byte
val = 0
while byte >= 128:
val = (val << 7) + (byte - 128)
byte = bytes_to_int(klv_stream.read(1))
val = (val << 7) + (byte)
return val
|
"""<internal>"""
'''
zlib License
(C) 2020-2021 DeltaRazero
All rights reserved.
'''
# TODO: The profiler is currently pretty bland and limited. I would like to expand it
# some more in the future.
# ***************************************************************************************
class _:
'<imports>'
import typing as t
import pathlib as pl
import operator
from .. import (
opts,
textio,
ruleset_t,
ILexer,
Token,
)
# ***************************************************************************************
class _RuleValueProfile:
"""Keeps track of the most common values of a rule.
"""
# --- FIELDS --- #
_valueOccurrences : _.t.Dict[str, int]
# --- CONSTRUCTOR --- #
def __init__(self) -> None:
self._valueOccurrences = {}
return
# --- PUBLIC METHODS --- #
def AddToken(self, token: _.Token) -> None:
if (token.data in self._valueOccurrences):
self._valueOccurrences[token.data] += 1
else:
self._valueOccurrences[token.data] = 1
return
def TopOccurrences(self, threshold: int=10) -> _.t.Dict[str, int]:
# First sort by map values
self._valueOccurrences = dict(
sorted(
self._valueOccurrences.items(),
key=_.operator.itemgetter(1),
reverse=True
)
)
to_return: _.t.Dict[str, int] = {}
for key in self._valueOccurrences:
value = self._valueOccurrences[key]
if (value >= threshold): to_return[key] = self._valueOccurrences[key]
return to_return
# ***************************************************************************************
class ProfilerLexer (_.ILexer):
"""A wrapper around a lexer implementation to provide profiling functionality.
"""
# --- FIELDS --- #
_lexer : _.ILexer
_ruleOccurrences : _.t.Dict[str, int]
_ruleProfiles : _.t.Dict[str, _RuleValueProfile]
# --- CONSTRUCTOR & DESTRUCTOR --- #
def __init__(self, lexer: _.ILexer) -> None:
"""ProfilerLexer object instance initializer.
Parameters
----------
lexer : ILexer
Instance of an ILexer implementation.
"""
self._lexer = lexer
self._ruleOccurrences = {}
self._ruleProfiles = {}
pass
def __del__(self):
del self._lexer
return
# --- INTERFACE METHODS (ILexer) --- #
def PushRuleset(self, ruleset: _.ruleset_t) -> None:
self._lexer.PushRuleset(ruleset)
return
def PopRuleset(self) -> None:
self._lexer.PopRuleset()
return
def ClearRulesets(self) -> None:
self._lexer.ClearRulesets()
return
def GetOptions(self) -> _.opts.LexerOptions:
return self._lexer.GetOptions()
def GetNextToken(self) -> _.Token:
token = self._lexer.GetNextToken()
if (not (token.id in self._ruleOccurrences)):
self._ruleOccurrences[token.id] = 0
self._ruleProfiles [token.id] = _RuleValueProfile()
self._ruleOccurrences[token.id] += 1
self._ruleProfiles [token.id].AddToken(token)
return token
# --- INTERFACE METHODS (ITextIO) --- #
def Open(self,
fp: _.t.Union[str, _.pl.Path],
bufferSize: int=_.textio.DEFAULT_BUFFER_SIZE,
encoding: str="UTF-8",
convertLineEndings: bool=True,
) -> None:
self._lexer.Open(
fp=fp,
bufferSize=bufferSize,
encoding=encoding,
convertLineEndings=convertLineEndings
)
return
def Load(self,
strData: str,
convertLineEndings: bool=False
) -> None:
self._lexer.Load(
strData=strData,
convertLineEndings=convertLineEndings,
)
return
def Close(self) -> None:
self._lexer.Close()
return
# --- PUBLIC METHODS --- #
def ShowReport(self, valueOccurranceThreshold: int=10) -> None:
"""Prints a report of which rules (identifiers) occur the most.
Parameters
----------
valueOccurranceThreshold : int, optional
Threshold to display the top most occurring values of a rule. A value lower
than 1 disables the display of values entirely.
By default 10
"""
# First sort by the rule occurrences map by values
self._ruleOccurrences = dict(
sorted(
self._ruleOccurrences.items(),
key=_.operator.itemgetter(1),
reverse=True
)
)
# Print description
msg = "Most occuring rules"
if (valueOccurranceThreshold > 0):
msg += f" + most occuring values respectively (limited to {valueOccurranceThreshold})"
print("\n" + msg)
print('=' * len(msg))
# Show most occurring values
for key in self._ruleOccurrences:
# Rule identifier and amount of occurrences
msg = f"{key}: {self._ruleOccurrences[key]}"
print("\n" + msg)
# If showing the top value occurrences, show them in order
if (valueOccurranceThreshold > 0):
print('-' * len(msg))
top_occurrences = self._ruleProfiles[key].TopOccurrences(threshold=valueOccurranceThreshold)
for key in top_occurrences:
print(f" {key} : {top_occurrences[key]}")
# Print newline
print()
return
|
import string
from module_info import *
from module_troops import *
from process_common import *
#from process_operations import *
num_face_numeric_keys = 4
def save_troops():
file = open(export_dir + "troops.txt","w")
file.write("troopsfile version 2\n")
file.write("%d "%len(troops))
for troop in troops:
troop_len = len(troop)
if troop_len == 11:
troop[11:11] = [0, 0, 0, 0, 0]
elif troop_len == 12:
troop[12:12] = [0, 0, 0, 0]
elif troop_len == 13:
troop[13:13] = [0, 0, 0]
elif troop_len == 14:
troop[14:14] = [0, 0]
elif troop_len == 15:
troop[15:15] = [0]
if (troop[4] > 0):
# add_tag_use(tag_uses,tag_scene,troop[4] & tsf_site_id_mask)
id_no = find_object(troops,convert_to_identifier(troop[0]))
# if (id_no >= 0): add_tag_use(tag_uses,tag_troop,id_no)
# if (troop[6] > 0): add_tag_use(tag_uses,tag_faction,troop[6])
file.write("\ntrp_%s %s %s %s %d %d %d %d %d %d\n "%(convert_to_identifier(troop[0]),replace_spaces(troop[1]),replace_spaces(troop[2]), replace_spaces(str(troop[13])), troop[3],troop[4],troop[5], troop[6], troop[14], troop[15]))
inventory_list = troop[7]
# inventory_list.append(itm_arrows)
# inventory_list.append(itm_bolts)
for inventory_item in inventory_list:
# add_tag_use(tag_uses,tag_item,inventory_item)
file.write("%d 0 "%inventory_item)
for i in xrange(64 - len(inventory_list)):
file.write("-1 0 ")
file.write("\n ")
attrib = troop[8]
strength = (attrib & 0xff)
agility = ((attrib >> 8)& 0xff)
intelligence = ((attrib >> 16)& 0xff)
charisma = ((attrib >> 24)& 0xff)
starting_level = (attrib >> level_bits) & level_mask
# gold = two_to_pow(2 + (attrib >> 12)& 0x0f) * random
file.write(" %d %d %d %d %d\n"%(strength,agility,intelligence,charisma,starting_level))
wp_word = troop[9]
for wp in xrange(num_weapon_proficiencies):
wp_level = wp_word & 0x3FF
file.write(" %d"%wp_level)
wp_word = wp_word >> 10
file.write("\n")
skill_array = troop[10]
for i in xrange(num_skill_words):
file.write("%d "%((skill_array >> (i * 32)) & 0xffffffff))
file.write("\n ")
face_keys = [troop[11],troop[12]]
for fckey in (face_keys):
word_keys = []
for word_no in xrange(num_face_numeric_keys):
word_keys.append((fckey >> (64 * word_no)) & 0xFFFFFFFFFFFFFFFF)
for word_no in xrange(num_face_numeric_keys):
file.write("%d "%(word_keys[(num_face_numeric_keys -1) - word_no]))
file.write("\n")
# word2 = (fckey >> 64) & 0xFFFFFFFFFFFFFFFF
# word3 = (fckey >> 128) & 0xFFFFFFFFFFFFFFFF
# word4 = (fckey >> 192) & 0xFFFFFFFFFFFFFFFF
# file.write("%d %d %d %d "%(word4, word3, word2, word1))
# face_keys = troop[10]
# for fckey in (face_keys):
# file.write("%d "%(fckey))
# for i in xrange(4 - len(face_keys)):
# file.write("0 ")
file.close()
def two_to_pow(x):
result = 1
for i in xrange(x):
result = result * 2
return result
def save_python_header():
file = open(src_dir + "ID_troops.py","w")
for i_troop in xrange(len(troops)):
file.write("trp_%s = %d\n"%(convert_to_identifier(troops[i_troop][0]),i_troop))
file.close()
print "Exporting troops data"
#tag_uses = load_tag_uses(export_dir)
save_python_header()
save_troops()
#save_tag_uses(export_dir, tag_uses)
#print "Generating C header..."
#save_c_header()
#print "Generating Python header..."
#print "Finished."
|
import pytest
from rabbitark.rabbitark import RabbitArk
@pytest.fixture
def rabbitark(option):
return RabbitArk(option)
|
"""This contains various functions used to compose the Bonaire API SSH commands."""
import re
from typing import Optional
from rassh.datatypes import WellFormedCommand
from rassh.exceptions.exception_with_status import ExceptionWithStatus
def set_group(ssh_command, ap_wiredmac: str, ap_group: str, cmd: WellFormedCommand):
lines = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"whitelist-db rap modify mac-address " + ap_wiredmac + " ap-group "
+ ap_group, cmd)
for line in lines:
if line.startswith("Entry Does not Exist"):
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"whitelist-db rap add mac-address " + ap_wiredmac
+ " ap-group " + ap_group, cmd)
lines = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"ap-regroup wired-mac " + ap_wiredmac + " " + ap_group, cmd)
if lines:
for line in lines:
if line.startswith("AP with MAC"):
# Might say "AP with MAC address PP:QQ:RR:SS:TT:UU not found."
# This means the AP was not found on the controller.
raise ExceptionWithStatus("Error: AP not found on controller when setting group.", 500)
elif line.startswith("NOTE: For cert RAP ap-group specified in RAP whitelist will take precedence"):
# You will see this line even if the group name is completely fictitious, no error is shown.
# This is as close as we ever get to knowing it was a success. Return (without an exception).
return
# TODO Is this correct? Or will RAP show "AP with MAC address ..."
raise ExceptionWithStatus("Error: Unexpected output when setting group.", 500)
# TODO is this actually an error? Can you ever set a group and *not* see "NOTE: ..."?
raise ExceptionWithStatus("Error: No output when setting group.", 500)
def reprovision_remote(ssh_command, remote_ap: int, cmd: WellFormedCommand):
if remote_ap == 1:
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller, "remote ap", cmd)
else:
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller, "no remote ap", cmd)
def enter_provisioning_mode(ssh_command, cmd: WellFormedCommand):
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller, "configure t", cmd)
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller, "provision-ap", cmd)
def end_end(ssh_command, cmd: WellFormedCommand):
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller, "end", cmd)
_ = ssh_command.expect_command(ssh_command.ssh_manager.master_controller, "end", cmd)
def reprovision_or_enqueue(ssh_command, request: str, ap_wiredmac: str, cmd: WellFormedCommand):
"""If an AP is down when attempting to reprovision, postpone the reprovisioning (and other actions).
Run this command near the start of the process so we can fail (enqueue) early."""
lines = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"read-bootinfo wired-mac " + ap_wiredmac, cmd)
for line in lines:
if line.startswith("AP with MAC"):
# Might say "AP with MAC address PP:QQ:RR:SS:TT:UU not found."
# This means the AP was not found on the controller.
raise ExceptionWithStatus("Error: AP was not found on the controller when reprovisioning.", 500)
if line.startswith("AP is down"):
# Enqueue the entire provisioning task until the AP is up.
enqueue_status = ssh_command.ssh_manager.queue.enqueue_request(request)
if enqueue_status:
raise ExceptionWithStatus("AP is down, command has been enqueued.", 202)
else:
raise ExceptionWithStatus("Error: AP is down and command could not be enqueued because of"
+ "a queue error.", 500)
def get_lms_ip_and_ap_status(ssh_command, ap_wiredmac: str, cmd: WellFormedCommand) -> (Optional[str], str):
"""Get the IP of the LMS that knows more about this AP."""
lines = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"show ap details wired-mac " + ap_wiredmac, cmd)
lms = None
ap_status = "Down"
for line in lines:
if line.startswith("LMS"):
parts = line.split()
try:
lms = parts[3]
except IndexError:
raise ExceptionWithStatus("Could not parse LMS IP.", 500)
if line.startswith("Status"):
parts = line.split()
try:
ap_status = parts[1]
except IndexError:
raise ExceptionWithStatus("Could not parse AP status.", 500)
# Might say """AP with MAC address PP:QQ:RR:SS:TT:UU not found."""
if line.startswith("AP with MAC "):
raise ExceptionWithStatus("AP not found on master when getting LMS.", 404)
return lms, ap_status
def get_lms_ip_and_connect_lms(ssh_command, ap_wiredmac: str, cmd: WellFormedCommand) -> str:
"""Connect to the IP of the LMS that knows more about this AP. Use this as a prelude to running LMS commands.
Includes some helpful exceptions so we know not to proceed with configuration if LMS is unavailable."""
lms, ap_status = get_lms_ip_and_ap_status(ssh_command, ap_wiredmac, cmd)
if ap_status == "Down":
raise ExceptionWithStatus("AP is down, cannot proceed.", 412)
if lms is None:
raise ExceptionWithStatus("No LMS found for this AP.", 404)
# Dynamically add an LMS if it is not already known (we may not have connected to it before).
if lms not in ssh_command.ssh_manager.switches:
ssh_command.ssh_manager.lms_ssh_connections[lms] = ssh_command.ssh_manager.get_new_expect_connection(lms)
# Get back to the "enable" prompt, in case something went wrong the last time we used this LMS.
_ = ssh_command.expect_command(lms, "end", cmd)
_ = ssh_command.expect_command(lms, "end", cmd)
return lms
def get_ap_name(ssh_command, ap_wiredmac: str, cmd: WellFormedCommand) -> str:
"""Get the name of this AP from its wired MAC."""
lines = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"show ap details wired-mac " + ap_wiredmac + " | include Basic", cmd)
ap_name = None
for line in lines:
if line.startswith("AP"):
groups = re.findall(r'AP "(.*)" Basic Information', line)
try:
ap_name = groups[0]
break
except IndexError:
raise ExceptionWithStatus("Could not parse AP name.", 500)
if not ap_name:
raise ExceptionWithStatus("AP name not found for this wired MAC.", 404)
return ap_name
def get_group(ssh_command, ap_wiredmac: str, cmd: WellFormedCommand) -> Optional[str]:
lines = ssh_command.expect_command(ssh_command.ssh_manager.master_controller,
"show ap details wired-mac " + ap_wiredmac + " | include Group", cmd)
group = None
for line in lines:
if line.startswith("Group"):
parts = line.split()
try:
group = parts[1]
except IndexError:
raise ExceptionWithStatus("Could not parse group name from controller output.", 500)
if line.startswith("AP with MAC"):
# """AP with MAC address PP:QQ:RR:SS:TT:UU not found."""
raise ExceptionWithStatus("AP not found when getting group.", 404)
return group
def lms_get_gains(ssh_command, lms: str, ap_wiredmac: str, cmd: WellFormedCommand) -> tuple:
lines = ssh_command.expect_command(lms, "show ap provisioning wired-mac " + ap_wiredmac
+ ' | include "gain for 802.11"', cmd)
a_ant_gain = None
g_ant_gain = None
for line in lines:
# Might say """AP is not registered with this switch"""
if line.startswith("AP is not registered with this switch"):
raise ExceptionWithStatus("AP not registered on LMS when getting gains.", 404)
# Might say """AP with MAC address PP:QQ:RR:SS:TT:UU not found."""
if line.startswith("AP with MAC "):
raise ExceptionWithStatus("AP not found on LMS when getting gains.", 404)
if line.startswith("Antenna gain for 802.11a"):
parts = line.split()
try:
if parts[4] == "N/A":
a_ant_gain = None
else:
a_ant_gain = parts[4]
except IndexError:
raise ExceptionWithStatus("Could not parse antenna gain (a) from controller output.", 500)
if line.startswith("Antenna gain for 802.11g"):
parts = line.split()
try:
if parts[4] == "N/A":
g_ant_gain = None
else:
g_ant_gain = parts[4]
except IndexError:
raise ExceptionWithStatus("Could not parse antenna gain (g) from controller output.", 500)
return (a_ant_gain, g_ant_gain)
def lms_get_remote_ap(ssh_command, lms: str, ap_wiredmac: str, cmd: WellFormedCommand) -> Optional[int]:
lines = ssh_command.expect_command(lms, "show ap provisioning wired-mac " + ap_wiredmac
+ ' | include "Remote AP"', cmd)
remote_ap = None
for line in lines:
# Might say """AP is not registered with this switch"""
if line.startswith("AP is not registered with this switch"):
raise ExceptionWithStatus("AP not registered on LMS when getting remote AP.", 404)
# Might say """AP with MAC address PP:QQ:RR:SS:TT:UU not found."""
if line.startswith("AP with MAC "):
raise ExceptionWithStatus("AP not found on LMS when getting remote AP.", 404)
if line.startswith("Remote AP"):
parts = line.split()
try:
if parts[2] == "No":
remote_ap = 0
elif parts[2] == "Yes":
remote_ap = 1
else:
raise ExceptionWithStatus("Could not recognise remote AP from controller output.", 500)
except IndexError:
raise ExceptionWithStatus("Could not parse remote AP from controller output.", 500)
return remote_ap
def get_remote_ap(ssh_command, ap_wiredmac, cmd: WellFormedCommand) -> int:
lms = get_lms_ip_and_connect_lms(ssh_command, ap_wiredmac, cmd)
remote_ap = lms_get_remote_ap(ssh_command, lms, ap_wiredmac, cmd)
return remote_ap
def get_gains(ssh_command, ap_wiredmac, cmd: WellFormedCommand) -> tuple:
lms = get_lms_ip_and_connect_lms(ssh_command, ap_wiredmac, cmd)
gains = lms_get_gains(ssh_command, lms, ap_wiredmac, cmd)
return gains
def get_gains_and_remote_ap(ssh_command, ap_wiredmac, cmd: WellFormedCommand) -> dict:
lms = get_lms_ip_and_connect_lms(ssh_command, ap_wiredmac, cmd)
gains = lms_get_gains(ssh_command, lms, ap_wiredmac, cmd)
remote_ap = lms_get_remote_ap(ssh_command, lms, ap_wiredmac, cmd)
return {"gains": gains, "remote_ap": remote_ap}
|
# -*- coding: utf-8 -*-
"""
@Time: 2021/6/28 16:09
@Author: zzhang zzhang@cenboomh.com
@File: config_cache_data.py
@desc:
"""
# 路由
from collect.utils.collect_utils import get_safe_data
router_config = None
# 数据转换规则
rules = None
# 模板关键字
key_word_rules = None
# sql 模板数据
sql_template_cache = {}
# 请求字段规则
request_rules = None
# django model 配置
django_model_config = None
# 请求处理器
request_handler = None
# 结果处理器
result_handler = None
# 模块处理器 = None
module_handler = None
# 模板自定义函数
filter_handler = None
# 请求插件
before_plugin = None
# 结果插件
after_plugin = None
# 第三方插件
third_application=None
class ConfigCacheData:
"""
获取配置缓存数据,可以将这个数据做成缓存,这里使用全局变量
"""
def __init__(self):
"""
"""
@staticmethod
def get_sql(key):
global sql_template_cache
return get_safe_data(key, sql_template_cache)
@staticmethod
def set_sql(key, content):
global sql_template_cache
sql_template_cache[key] = content
@staticmethod
def set_router_config(router_config_data):
global router_config
router_config = router_config_data
@staticmethod
def get_router_config():
"""
获取路由规则
:return:
"""
global router_config
return router_config
@staticmethod
def get_rules():
"""
获取数据结果转换规则
:return:
"""
global rules
return rules
@staticmethod
def set_rules(rules_data):
global rules
rules = rules_data
@staticmethod
def get_django_model_config():
"""
获取django model 配置
:return:
"""
global django_model_config
return django_model_config
@staticmethod
def set_django_model_config(django_model_config_data):
global django_model_config
django_model_config = django_model_config_data
@staticmethod
def get_key_word_rules():
"""
获取关键字规则
:return:
"""
global key_word_rules
return key_word_rules
@staticmethod
def set_key_word_rules(key_word_rule_data):
"""
设置关键字
:return:
"""
global key_word_rules
key_word_rules = key_word_rule_data
@staticmethod
def set_request_rules(request_rules_data):
global request_rules
request_rules = request_rules_data
@staticmethod
def get_request_rules():
global request_rules
return request_rules
@staticmethod
def set_request_handler(request_handler_data):
global request_handler
request_handler = request_handler_data
@staticmethod
def get_request_handler():
global request_handler
return request_handler
@staticmethod
def set_result_handler(result_handler_data):
global result_handler
result_handler = result_handler_data
@staticmethod
def get_result_handler():
global result_handler
return result_handler
@staticmethod
def set_module_handler(module_handler_data):
global module_handler
module_handler = module_handler_data
@staticmethod
def get_module_handler():
global module_handler
return module_handler
@staticmethod
def set_filter_handler(filter_handler_data):
global filter_handler
filter_handler = filter_handler_data
@staticmethod
def get_filter_handler():
global filter_handler
return filter_handler
@staticmethod
def set_before_plugin(before_plugin_data):
global before_plugin
before_plugin = before_plugin_data
@staticmethod
def get_before_plugin():
global before_plugin
return before_plugin
@staticmethod
def set_after_plugin(after_plugin_data):
global after_plugin
after_plugin = after_plugin_data
@staticmethod
def get_after_plugin():
global after_plugin
return after_plugin
@staticmethod
def set_third_application(third_application_data):
global third_application
third_application = third_application_data
@staticmethod
def get_third_application():
global third_application
return third_application
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
import string
import random
import os
import argparse
# Makefileにインジェクトする文字列ファイルをランダムなファイル名で生成する
# classが破壊されるとともに,ファイルは削除される
class makefileInjectant:
def __init__(self):
#ランダム文字列の注入物質ファイル名を生成
self.injectant = """print-%:
@echo '$($*)'"""
self.path_to_injectant =\
os.getcwd() + '/.__'+''.join([random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for i in range(10)])\
+ '.mk'
f = open(self.path_to_injectant, 'w')
f.write(self.injectant)
f.close()
def getPathToInjectant(self):
return self.path_to_injectant
def __del__(self):
os.remove(self.path_to_injectant)
# Makefileへのパスと変数名から,変数内容をテキストで出力 utf-8で出力
def getMakefileVariableText(path_to_makefile_, variable_):
inj = makefileInjectant()
path_to_injectant = inj.getPathToInjectant()
# print(path_to_injectant)
ord_val = 'print-' + variable_
mkout = subprocess.check_output(['make','-C', os.path.dirname(path_to_makefile_), '-f', path_to_injectant, '-f', path_to_makefile_, ord_val]).decode('utf8')
return mkout
# Makefileへのパスと変数名から,変数内容を配列で出力
def getMakefileVariable(path_to_makefile_, variable_):
val_text = getMakefileVariableText(path_to_makefile_, variable_)
val_text_split = val_text.split(' ')
vals = []
for v in val_text_split:
if((v == '') | (v == '\n')):
continue
vals.append(v.replace('\n', ''))
return vals
def main():
# 引数
# パーサの作成
psr = argparse.ArgumentParser()
psr.add_argument('--path_to_makefile', '-i', \
default='./Makefile', \
type=str, \
help='Path to CubeMX generated Makefile')
psr.add_argument('--path_to_output_directory', '-o', \
default='.', \
type=str, \
nargs=1, \
help='Path to otuput directory of CMakeists.txt')
arg = psr.parse_args()
############################################
# SET DEFAULF TOOL CHAIN PATH
############################################
path_to_default_toolchain = '/opt/env/gcc-arm-none-eabi-6-2017-q2-update'
path_to_makefile = arg.path_to_makefile
path_to_cmakelists_template = './template/CMakeLists.txt'
path_to_output = arg.path_to_output_directory[0] + '/CMakeLists.txt'
# print(path_to_makefile)
# print(path_to_output)
# path_to_makefile = './Makefile'
# path_to_output = './CMakeLists.txt'
# inj = makefileInjectant()
# mkout = subprocess.check_output(['make', '-f', 'inj.mk', '-f', 'Makefile', 'print-C_SOURCES']).decode('utf8')
# print(mkout)
# print(mkout.replace(' ', '\n'))
# print(mkout.split(' '))
# print(getMakefileVariableText('Makefile', 'C_SOURCES').split(' '))
# print(getMakefileVariable(path_to_makefile, 'OBJECTS'))
# for i in getMakefileVariable('Makefile', 'TARGET'):
# print(i)
root_dir = "${PROJECT_SOURCE_DIR}"
# CMakeLists.txtのテンプレートを読み込み
cmake_template_ = open(path_to_cmakelists_template).read()
cmake_template = cmake_template_
# 出力用ファイルハンドラ
f_out = open(path_to_output, 'w')
##############################################
# プロジェクト設定
##############################################
target_name = getMakefileVariable(path_to_makefile, 'TARGET')[0]
set_project = 'PROJECT(' + target_name + ' C CXX ASM)'
#テンプレートを置換
cmake_template = cmake_template.replace("###%PROJECT%###", set_project)
##############################################
# ソース,インクルードディレクトリ関係の情報をあつめる
##############################################
# C source の設定
set_c_sources = 'SET(C_SOURCES '
c_sources = getMakefileVariable(path_to_makefile, 'C_SOURCES')
for i in c_sources:
set_c_sources = set_c_sources + root_dir + '/' + i + ' '
set_c_sources = set_c_sources + ')'
print('###########################################')
print(set_c_sources)
#テンプレートを置換
cmake_template = cmake_template.replace('###%C_SOURCES%###', set_c_sources)
# ASM source の設定
set_asm_sources = 'SET(ASM_SOURCES '
asm_sources = getMakefileVariable(path_to_makefile, 'ASM_SOURCES')
for i in asm_sources:
set_asm_sources = set_asm_sources + root_dir + '/' + i + ' '
set_asm_sources = set_asm_sources + ')'
print('###########################################')
print(set_asm_sources)
#テンプレートを置換
cmake_template = cmake_template.replace('###%ASM_SOURCES%###', set_asm_sources)
# C include dirs の設定
set_c_includes = 'SET(C_INCLUDES '
c_includes = getMakefileVariable(path_to_makefile, 'C_INCLUDES')
for i in c_includes:
inc_d = root_dir + '/' + i.replace('-I', '') + ' '
set_c_includes = set_c_includes + inc_d
set_c_includes = set_c_includes + ')'
# include dirs 設定コマンド
set_include_directories_c = 'INCLUDE_DIRECTORIES(${C_INCLUDES})'
print('###########################################')
print(set_c_includes)
#テンプレートを置換
cmake_template = cmake_template.replace('###%C_INCLUDES%###', set_c_includes)
# ASM include dirs の設定
set_as_includes = 'SET(AS_INCLUDES '
as_includes = getMakefileVariable(path_to_makefile, 'AS_INCLUDES')
for i in as_includes:
inc_d = root_dir + '/' + i.replace('-I', '') + ' '
set_as_includes = set_as_includes + inc_d
set_as_includes = set_as_includes + ')'
# include dirs 設定コマンド
set_include_directories_as = 'INCLUDE_DIRECTORIES(${AS_INCLUDES})'
print('###########################################')
print(set_as_includes)
#テンプレートを置換
cmake_template = cmake_template.replace('###%ASM_INCLDUES%###', set_as_includes)
##############################################
# define系の設定
##############################################
as_defines = getMakefileVariable(path_to_makefile, 'AS_DEFS')
c_defines = getMakefileVariable(path_to_makefile, 'C_DEFS')
set_add_definitions = ''
for i in as_defines:
set_add_definitions = set_add_definitions + 'add_definitions(' + i + ')\n'
for i in c_defines:
set_add_definitions = set_add_definitions + 'add_definitions(' + i + ')\n'
print('###########################################')
print(set_add_definitions)
#テンプレートを置換
cmake_template = cmake_template.replace('###%DEFINITIONS%###', set_add_definitions)
##############################################
# コンパイラのフラグ関係の情報を集める
##############################################
# MCU関係
set_mcu = 'SET(MCU "'
mcu_options = getMakefileVariable(path_to_makefile, 'MCU')
for i in mcu_options:
set_mcu = set_mcu + i + ' '
set_mcu = set_mcu + '")'
print('###########################################')
print(set_mcu)
#テンプレートを置換
cmake_template = cmake_template.replace('###%MCU%###', set_mcu)
# 最適化オプション
set_opt = 'SET(OPT "'
opt_options = getMakefileVariable(path_to_makefile, 'OPT')
for i in opt_options:
set_opt = set_opt + i + ' '
set_opt = set_opt + '")'
print('###########################################')
print(set_opt)
#テンプレートを置換
cmake_template = cmake_template.replace('###%OPT%###', set_opt)
# アセンブラフラグ
set_asflags = 'SET(ASFLAGS "-x assembler-with-cpp ${MCU} ${OPT} -Wall -fdata-sections -ffunction-sections")'
set_cmake_asm_flags = 'SET(CMAKE_ASM_FLAGS ${ASFLAGS})'
print('###########################################')
print(set_asflags)
print(set_cmake_asm_flags)
#テンプレートを置換
cmake_template = cmake_template.replace('###%ASM_FLAGS%###', set_asflags)
# Cコンパイラフラグ
set_cflags = 'SET(CFLAGS "${MCU} ${OPT} -Wall -fdata-sections -ffunction-sections -g -gdwarf-2")'
set_cmake_c_flags = 'SET(CMAKE_C_FLAGS ${CFLAGS})'
print('###########################################')
print(set_cflags)
print(set_cmake_c_flags)
#テンプレートを置換
cmake_template = cmake_template.replace('###%C_FLAGS%###', set_cflags)
#### リンカフラグ ####
# リンカスクリプトパス
set_ldscript = 'SET(LDSCRIPT '
ldscripts = getMakefileVariable(path_to_makefile, 'LDSCRIPT')
for i in ldscripts:
p2ldscript = root_dir + '/' + i + ' '
set_ldscript = set_ldscript + p2ldscript
set_ldscript = set_ldscript + ')'
print('###########################################')
print(set_ldscript)
#テンプレートを置換
cmake_template = cmake_template.replace('###%LDSCRIPTS%###', set_ldscript)
# ライブラリ
set_libs = 'SET(LIBS "'
libs = getMakefileVariable(path_to_makefile, 'LIBS')
for i in libs:
set_libs = set_libs + i + ' '
set_libs = set_libs + '")'
print('###########################################')
print(set_libs)
#テンプレートを置換
cmake_template = cmake_template.replace('###%LIBS%###', set_libs)
# リンカフラグ
set_ldflags = 'SET(LDFLAGS "${MCU} -specs=nano.specs -T${LDSCRIPT} ${LIBS} -Wl,-Map=${PROJECT_BINARY_DIR}/${PROJECT_NAME}.map,--cref -Wl,--gc-sections")'
print('###########################################')
print(set_ldflags)
#テンプレートを置換
cmake_template = cmake_template.replace('###%LDFLAGS%###', set_ldflags)
# リンカフラグ設定
set_cmake_exe_linker_flags = 'SET(CMAKE_EXE_LINKER_FLAGS ${LDFLAGS})'
##############################################
# クロスコンパイラのパス
##############################################
path_to_bin_dirs = getMakefileVariable(path_to_makefile, 'BINPATH')
path_to_bin_dir = ''
path_to_bin_dir_cxx = ''
if(len(path_to_bin_dirs) == 0) :
path_to_bin_dir = path_to_default_toolchain + "/bin"
path_to_bin_dir_cxx = path_to_bin_dir
else :
path_to_bin_dir = ''
path_to_bin_dir_cxx = path_to_bin_dirs[0]
bin_prefix = getMakefileVariable(path_to_makefile, 'PREFIX')[0]
path_to_cc = path_to_bin_dir + getMakefileVariable(path_to_makefile, 'CC')[0]
path_to_cxx = path_to_bin_dir_cxx + '/' + bin_prefix + 'g++'
path_to_as = path_to_cc
path_to_cp = path_to_bin_dir + getMakefileVariable(path_to_makefile, 'CP')[0]
path_to_ar = path_to_bin_dir + getMakefileVariable(path_to_makefile, 'AR')[0]
path_to_sz = path_to_bin_dir + getMakefileVariable(path_to_makefile, 'SZ')[0]
print('###########################################')
print(path_to_bin_dir)
print(bin_prefix)
print(path_to_cc)
print(path_to_cxx)
print(path_to_as)
print(path_to_cp)
print(path_to_ar)
print(path_to_sz)
set_cmake_c_compiler = 'SET(CMAKE_C_COMPILER ' + path_to_cc + ')'
set_cmake_cxx_compiler = 'SET(CMAKE_CXX_COMPILER ' + path_to_cxx + ')'
set_cmake_asm_compiler = 'SET(CMAKE_ASM_COMPILER ' + path_to_as + ')'
set_cp = 'SET(OBJCP ' + path_to_cp + ')'
print('###########################################')
print(set_cmake_asm_compiler)
print(set_cmake_c_compiler)
print(set_cmake_cxx_compiler)
print(set_cp)
#テンプレートを置換
cmake_template = cmake_template.replace('###%C_COMPILER%###', set_cmake_c_compiler)
#テンプレートを置換
cmake_template = cmake_template.replace('###%CXX_COMPILER%###', set_cmake_cxx_compiler)
#テンプレートを置換
cmake_template = cmake_template.replace('###%ASM_COMPILER%###', set_cmake_asm_compiler)
#テンプレートを置換
cmake_template = cmake_template.replace('###%CP%###', set_cp)
##############################################
# 置換テンプレートを保存
##############################################
print('############################################')
print(cmake_template)
f_out.write(cmake_template)
f_out.close()
if __name__ == '__main__': main()
|
"""GARCH LLH benchmark, Tensorflow v1 version."""
import numpy as np
import time
import argparse
import json
import math
from util import benchmark, garch_data
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
parser = argparse.ArgumentParser()
parser.add_argument('-n', metavar='n', default=1000, type=int,
help=f'number of iterations')
parser.add_argument('-mode', metavar='mode', default='cpu',
choices=['cpu', 'gpu'], help='use cpu/gpu')
args = parser.parse_args()
out = {}
ret, x0, val_llh = garch_data()
config = tf.ConfigProto()
if args.mode == 'cpu':
config = tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)
sess = tf.Session(config=config)
ht0 = np.mean(np.square(ret))
y = tf.Variable(ret, name='y', dtype='float32')
params = tf.Variable(x0, name='params')
y2 = tf.square(y)
ht_zero = tf.reduce_mean(y2)
def garch(ht, y):
return params[0] + params[1]*y + params[2]*ht
hts = tf.scan(fn=garch, elems=y2, initializer=ht_zero)
hts = tf.concat(([ht0], hts[:-1]), 0)
tf_llh = (-0.5*(tf.cast(y.shape[0], tf.float32)-1)*tf.log(2*np.pi) -
0.5*tf.reduce_sum(tf.log(hts) + tf.square(y/tf.sqrt(hts))))
with sess.as_default():
tf.global_variables_initializer().run()
t = benchmark(lambda: sess.run(tf_llh, feed_dict={params: x0}),
args.n,
val_llh)
out['tensorflow-' + args.mode] = t
def unroll(x, h0, n):
h = h0
return tf.stack([h := garch(h, x[t]) for t in range(n)])
tf_unroll0 = unroll(y2, ht0, len(ret) - 1)
hts = tf.concat(([ht0], tf_unroll0), 0)
tf_llh_unroll = (-0.5*(tf.cast(y.shape[0], tf.float32)-1)*tf.log(2*np.pi) -
0.5*tf.reduce_sum(tf.log(hts) + tf.square(y/tf.sqrt(hts))))
with sess.as_default():
tf.global_variables_initializer().run()
t = benchmark(lambda: sess.run(tf_llh_unroll, feed_dict={params: x0}),
args.n,
val_llh)
out['tensorflow-v1-'+args.mode+'-unroll'] = t
print(json.dumps(out))
|
from django.conf import settings
from django.shortcuts import redirect
from django.utils.http import urlquote
from django.contrib.auth import REDIRECT_FIELD_NAME
from impersonate.helpers import get_redir_path, check_allow_impersonate
def allowed_user_required(view_func):
def _checkuser(request, *args, **kwargs):
if not request.user.is_authenticated():
return redirect('%s?%s=%s' % (
settings.LOGIN_URL,
REDIRECT_FIELD_NAME,
urlquote(request.get_full_path()),
))
if getattr(request.user, 'is_impersonate', False):
# Do not allow an impersonated session to use the
# impersonate views.
return redirect(get_redir_path())
if check_allow_impersonate(request):
# user is allowed to impersonate
return view_func(request, *args, **kwargs)
else:
# user not allowed impersonate at all
return redirect(get_redir_path())
return _checkuser
|
#!/usr/bin/python
import hashlib, re, sys, urllib2
from optparse import OptionParser
HASH_REGEX = re.compile("([a-fA-F0-9]{32})")
# Gets an HTTP response from a url, returns string
# This spoofs the user agent because Google blocks
# bot requets from urllib usually
def getResponse(url):
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20121207 Firefox/29.0')]
try:
response = opener.open(url).read()
except:
print "Unexpected HTTP Error"
sys.exit(-1)
return response
# h = hash as a hex string?
# wordlist = a set
def dictionary_attack(h, wordlist):
for word in wordlist:
if hashlib.md5(word).hexdigest() == h:
return word
return None
# h = hash, as a hex string
def format_it(h, plaintext):
return "{myhash}:{myplaintext}".format(myhash = h, myplaintext = plaintext)
def crack_single_hash(h):
URL = "http://www.google.com/search?q={myhash}".format(myhash = h)
response = getResponse(URL)
wordlist = response.read().replace('.', ' ').replace(
':', ' ').replace('?', '').replace("('", ' ').replace("'", ' ').split(' ')
plaintext = dictionary_attack(h, set(wordlist))
return plaintext
class BozoCrack(object):
def __init__(self, filename, *args, **kwargs):
self.hashes = []
with open(filename, 'r') as f:
hashes = [h.lower() for line in f if HASH_REGEX.match(line)
for h in HASH_REGEX.findall(line.replace('\n', ''))]
self.hashes = sorted(set(hashes))
print "Loaded {count} unique hashes".format(count = len(self.hashes))
self.cache = self.load_cache()
def crack(self):
cracked_hashes = []
for h in self.hashes:
if h in self.cache:
print format_it(h, self.cache[h])
cracked_hashes.append( (h, self.cache[h]) )
continue
plaintext = crack_single_hash(h)
if plaintext:
print format_it(h, plaintext)
self.cache[h] = plaintext
self.append_to_cache(h, plaintext)
cracked_hashes.append( (h, plaintext) )
else:
print "Hash unable to be cracked."
return cracked_hashes
def load_cache(self, filename='cache'):
cache = {}
with open(filename, 'a+') as c:
for line in c:
hash, plaintext = line.replace('\n', '').split(':', 1)
cache[hash] = plaintext
return cache
def append_to_cache(self, h, plaintext, filename='cache'):
with open(filename, 'a+') as c:
c.write(format_it(h, plaintext)+"\n")
def main(): # pragma: no cover
parser = OptionParser()
parser.add_option('-s', '--single', metavar='MD5HASH',
help = 'cracks a single hash', dest='single', default = False)
parser.add_option('-f', '--file', metavar='HASHFILE',
help = 'cracks multiple hashes on a file', dest = 'target',)
options, args = parser.parse_args()
if not options.single and not options.target:
parser.error("please select -s or -f")
elif options.single:
plaintext = crack_single_hash(options.single)
if plaintext:
print format_it(options.single, plaintext)
else:
print "Hash unable to be cracked."
else:
cracked = BozoCrack(options.target).crack()
if not cracked:
print "No hashes were cracked."
if __name__ == '__main__': # pragma: no cover
main()
|
"""
newaliaswindow.py
"""
try:
import pygtk
pygtk.require("2.0")
except:
pass
from gi.repository import Gtk
class NewAliasWindow(object):
"""
Window for creating new aliases in the application.
"""
def __init__(self, application):
builder = Gtk.Builder()
builder.add_from_file("ui/NewAlias.glade")
self.window = builder.get_object("NewAliasWindow")
self.entry_server = builder.get_object("EntryServer")
self.entry_alias = builder.get_object("EntryAlias")
self.entry_authenticate = builder.get_object("EntryAuthenticate")
self.application = application
builder.connect_signals(self)
self.window.set_visible(True)
def create_new_alias(self, element):
server = self.entry_server.get_text()
alias = self.entry_alias.get_text()
authenticate = self.entry_authenticate.get_text()
self.application.add_alias(alias, server, None)
self.window.destroy()
def cancel(self, element):
self.window.destroy()
|
#!/usr/bin/env python
import board
import busio
import adafruit_tca9548a
import adafruit_drv2605
i2c = busio.I2C(board.SCL, board.SDA)
tca = adafruit_tca9548a.TCA9548A(i2c)
drv1 = adafruit_drv2605.DRV2605(tca[0])
drv2 = adafruit_drv2605.DRV2605(tca[1])
while True:
drv1.sequence[0] = adafruit_drv2605.Effect(1)
drv1.play()
drv2.sequence[0] = adafruit_drv2605.Effect(47)
drv2.play()
|
description = 'FOV linear axis for the large box (300 x 300)'
group = 'optional'
excludes = ['fov_100x100', 'fov_190x190']
includes = ['frr']
devices = dict(
fov_300_mot = device('nicos.devices.generic.VirtualReferenceMotor',
description = 'FOV motor',
visibility = (),
abslimits = (275, 950),
userlimits = (276, 949),
refswitch = 'low',
refpos = 275,
unit = 'mm',
speed = 5,
),
# fov_300_enc = device('nicos.devices.generic.VirtualCoder',
# description = 'FOV encoder',
# motor = 'fov_300_mot',
# visibility = (),
# ),
fov_300 = device('nicos.devices.generic.Axis',
description = 'FOV linear axis',
pollinterval = 5,
maxage = 10,
precision = 0.1,
fmtstr = '%.2f',
motor = 'fov_300_mot',
# coder = 'fov_300_enc',
),
)
|
#!/user/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2018/4/26 14:11
# @Author : zhangjun
# Title :
|
"""
Parses the input data and does the following operations
- Loads the CSV dataset
- Geocodes city names to latitude and longitude
- Exports the data as JSON
"""
import argparse
import csv
import json
import urllib.parse
import urllib.request
import os
import time
import tableprint
GEOCODE_API_KEY = os.environ.get('GEOCODE_API_KEY')
if GEOCODE_API_KEY == None:
raise Exception('GEOCODE_API_KEY environment variable not defined')
GEOCODE_URL = 'https://maps.googleapis.com/maps/api/geocode/json?{}'
def read_csv(filename):
""" Reads a CSV file and returns the data entries """
data_entries = []
with open(filename, 'r') as csvfile:
reader = csv.reader(csvfile)
# Skip the header
next(reader)
for row in reader:
entry = {
'city_name': row[0],
'crime_rate': float(row[4])
}
data_entries.append(entry)
return data_entries
def print_info(json_data):
components = json_data['results'][0]['address_components'][0]
location = json_data['results'][0]['geometry']['location']
types = ', '.join(components['types'])
data = [
components['long_name'],
location['lat'],
location['lng']
]
print(tableprint.row(data, width=30))
def geocode_city_name(city_name):
""" Geocodes a city name and returns the address """
params = {
'address': city_name,
'key': GEOCODE_API_KEY
}
encoded_params = urllib.parse.urlencode(params)
url = GEOCODE_URL.format(encoded_params)
coords = ()
try:
with urllib.request.urlopen(url) as f:
json_data = json.loads(f.read())
location = json_data['results'][0]['geometry']['location']
coords = (location['lat'], location['lng'])
print_info(json_data)
except urllib.error.HTTPError as err:
print('Got error code: {}'.format(err.code))
return coords
def geocode_crime_data(data_entries):
""" Geocodes a list of crime data entries """
print(tableprint.header(['City Name', 'Lat', 'Long'], width=30))
for entry in data_entries:
coords = geocode_city_name(entry['city_name'])
entry['coords'] = coords
# Add a delay for API limiting
time.sleep(0.1)
print(tableprint.bottom(3, width=30))
return data_entries
def save_json_file(data, filename):
""" Saves the data as a JSON file """
with open(filename, 'w') as f:
json_data = json.dumps(data)
f.write(json_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='The filename of the data file')
args = parser.parse_args()
data_entries = read_csv(args.filename)
data_entries = geocode_crime_data(data_entries)
save_json_file(data_entries, 'output.json')
|
import sys
if sys.version_info >= (3, 8):
from unittest import IsolatedAsyncioTestCase as TestCase
else:
from unittest import TestCase
import pytest
import jj
from jj.apps import DefaultApp, create_app
from jj.handlers import default_handler
from jj.matchers import PathMatcher
from jj.resolvers import Registry, ReversedResolver
from jj.responses import Response
from .._test_utils import run
class TestDefaultApp(TestCase):
def setUp(self):
self.default_app = create_app()
self.resolver = ReversedResolver(Registry(), self.default_app, default_handler)
def test_default_app_is_singleton(self):
self.assertEqual(DefaultApp(), DefaultApp())
@pytest.mark.asyncio
async def test_default_app_with_handler(self):
path, status, text = "/route", 201, "text"
@PathMatcher(path, resolver=self.resolver)
async def handler(request):
return Response(status=status, text=text)
async with run(self.default_app, self.resolver) as client:
response = await client.get(path)
self.assertEqual(response.status, status)
self.assertEqual(await response.text(), text)
@pytest.mark.asyncio
async def test_default_app_without_handlers(self):
path, status, text = "/route", 201, "text"
class App(jj.App):
resolver = self.resolver
@PathMatcher(path, resolver=resolver)
async def handler(request):
return Response(status=status, text=text)
async with run(self.default_app, self.resolver) as client:
response = await client.get(path)
self.assertEqual(response.status, 404)
@pytest.mark.asyncio
async def test_default_app(self):
path = "/route"
status1, text1 = 201, "text-1"
status2, text2 = 202, "text-2"
@PathMatcher(path, resolver=self.resolver)
async def handler(request):
return Response(status=status1, text=text1)
class App(jj.App):
resolver = self.resolver
@PathMatcher(path, resolver=resolver)
async def handler(request):
return Response(status=status2, text=text2)
async with run(self.default_app, self.resolver) as client:
response = await client.get(path)
self.assertEqual(response.status, status1)
self.assertEqual(await response.text(), text1)
async with run(App(), self.resolver) as client:
response = await client.get(path)
self.assertEqual(response.status, status2)
self.assertEqual(await response.text(), text2)
|
# Floyd–Steinberg dithering with serpentine scanning
# Quelle: https://en.wikipedia.org/wiki/Floyd–Steinberg_dithering
#
# Argument pixels erwartet ein zwei-dimensionales Array aus Tupel
# Die Tupel enthalten Elemente für genau drei Farbkanäle
# Alte Version erwartet Tupel, mit drei Werte von je 0-255
# (und war buggy)
#def find_closest_palette_color( oldpixel, palette ):
# ch = [0,0,0]
# ch[0] = round( oldpixel[0] / 255 * 2**palette )
# ch[1] = round( oldpixel[1] / 255 * 2**palette )
# ch[2] = round( oldpixel[2] / 255 * 2**palette )
# return( ( int( ch[0]*255/2**palette ), int( ch[1]*255/2**palette ), int( ch[2]*255/2**palette ) ) )
#
#
# Erwartet einen einzelnen Farbwert als Quotient zwischen Wert
# und Farbraum 80 in TrueColor(8bit pro Kanal) = 85 = 85/255 = 1/3 = 0.333
# Es gilt: 0 ≤ ColorRation ≤ 1
# Die Palette beschreibt, wie viele Farbabstufungen
# zwischen 0% und 100% möglich sind, wobei das Argument den
# Exponenten zur Basis 2 darstellt. (3= 2**3 = 8)
def find_closest_palette_color( ColorRatio: float, palette: int ) -> float:
raum = 2**palette - 1
ret = round( ColorRatio * raum ) / raum
# Stelle sicher, dass der Wert stets zwischen 0 und 1 liegt
ret = InBetween( ret, 0.0, 1.0 )
return( ret )
# Notiz: Python rundet bis inkl. 0.5 RUNTER
# da der Divisor (raum) aber immer ungerade sein muss, kann ColorRation nie 0.5 sein
def InBetween( value: float, minimum: float=0.0 , maximum: float=1.0 ) -> float:
ret = value
ret = max( ret, minimum )
ret = min( ret, maximum )
return( ret )
def fsd( PrePic, WIDTH, HEIGHT, palette, DEPTH ):
with open("R:\\Temp\\debug.txt","w+") as f:
#
# DEBUG START
# print( str(WIDTH), str(HEIGHT), str(palette), str(DEPTH), file=f )
# DEBUG ENDE
#
px = PrePic
for y in range( HEIGHT ):
oldpixel = None
newpixel = None
for x in range( WIDTH ):
oldpixel = px[y*WIDTH+x] / (2**DEPTH-1)
newpixel = find_closest_palette_color( oldpixel, palette )
px[y*WIDTH+x] = int( round( newpixel * (2**DEPTH-1) ) )
# Wenn die virtuelle Farbtiefe größer ist als die
# tatsächliche Farbtiefe, also der Farbraum der
# Palette, dann nutze Dithering
if DEPTH > palette:
quant_error = (oldpixel - newpixel) * (2**DEPTH-1)
if x < WIDTH-1:
# DEBUG
# print("#1 Rechne: ", px[y*WIDTH+x+1], "+", quant_error * 7/16, "=", int( round( InBetween( (px[y*WIDTH+x+1] + quant_error * 7/16) / (2**DEPTH-1) ) * (2**DEPTH-1) ) ), file=f)
px[y*WIDTH+x+1] = InBetween( (px[y*WIDTH+x+1] + quant_error * 7/16) / (2**DEPTH-1) ) * (2**DEPTH-1)
if x > 0 and y < HEIGHT -1:
# DEBUG
# print("#2 Rechne: ", px[(y+1)*WIDTH+x-1], "+", quant_error * 3/16, "=", int( round( InBetween( (px[(y+1)*WIDTH+x-1] + quant_error * 3/16) / (2**DEPTH-1) ) * (2**DEPTH-1) ) ), file=f)
px[(y+1)*WIDTH+x-1] = InBetween( (px[(y+1)*WIDTH+x-1] + quant_error * 3/16) / (2**DEPTH-1) ) * (2**DEPTH-1)
if y < HEIGHT -1:
# DEBUG
# print("#3 Rechne: ", px[(y+1)*WIDTH+x], "+", quant_error * 5/16, "=", int( round( InBetween( (px[(y+1)*WIDTH+x] + quant_error * 5/16) / (2**DEPTH-1) ) * (2**DEPTH-1) ) ), file=f)
px[(y+1)*WIDTH+x] = InBetween( (px[(y+1)*WIDTH+x] + quant_error * 5/16) / (2**DEPTH-1) ) * (2**DEPTH-1)
if x < WIDTH -1 and y < HEIGHT -1:
# DEBUG
# print("#4 Rechne: ", px[(y+1)*WIDTH+x+1], "+", quant_error * 1/16, "=", int( round( InBetween( (px[(y+1)*WIDTH+x+1] + quant_error * 1/16) / (2**DEPTH-1) ) * (2**DEPTH-1) ) ), file=f)
px[(y+1)*WIDTH+x+1] = InBetween( (px[(y+1)*WIDTH+x+1] + quant_error * 1/16) / (2**DEPTH-1) ) * (2**DEPTH-1)
# DEBUG
# print("Zeile: ", y, "Alt: ", str(oldpixel), "Neu: ", str(newpixel), px[y*WIDTH+x], file=f )
return px
# def fsd( pixels, width, height, palette=1, serpentine=False ):
#
# for y in range( height ):
# for x in range( width ):
#
# # Qualitätsverbesserung:
# # IM Serpentin-Modus soll das Bild etwas weniger Artefakte produzieren
# # if serpentine == True:
# # x = width -1 - x
#
# #
# oldpixel = pixels[x,y]
# newpixel = find_closest_palette_color( oldpixel, palette )
# pixels[x,y] = newpixel
# quant_error = ( oldpixel[0]-newpixel[0], oldpixel[1]-newpixel[1], oldpixel[2]-newpixel[2] )
#
# # push errors to neighbours
# if x < width-1:
# pixels[x+1,y] = (
# int( round( pixels[x+1,y][0] + quant_error[0] * 7/16 ) ),
# int( round( pixels[x+1,y][1] + quant_error[1] * 7/16 ) ),
# int( round( pixels[x+1,y][2] + quant_error[2] * 7/16 ) )
# )
# if x > 0 and y < height -1:
# pixels[x-1,y+1] = (
# int( round( pixels[x-1,y+1][0] + quant_error[0] * 3/16 ) ),
# int( round( pixels[x-1,y+1][1] + quant_error[1] * 3/16 ) ),
# int( round( pixels[x-1,y+1][2] + quant_error[2] * 3/16 ) )
# )
# if y < height -1:
# pixels[x,y+1] = (
# int( round( pixels[x,y+1][0] + quant_error[0] * 5/16 ) ),
# int( round( pixels[x,y+1][1] + quant_error[1] * 5/16 ) ),
# int( round( pixels[x,y+1][2] + quant_error[2] * 5/16 ) )
# )
# if x < width -1 and y < height -1:
# pixels[x+1,y+1] = (
# int( round( pixels[x+1,y+1][0] + quant_error[0] * 1/16 ) ),
# int( round( pixels[x+1,y+1][1] + quant_error[1] * 1/16 ) ),
# int( round( pixels[x+1,y+1][2] + quant_error[2] * 1/16 ) )
# )
#
# #
# pixels[x,y] = ( pixels[x,y][0] << 4,pixels[x,y][1] << 4,pixels[x,y][2] << 4 )
|
'''
while True:
print("Welcome to loops")
print("............................")
'''
'''
import time
while True:
print("MOnitoring file system usage")
time.sleep(1)
'''
'''
value=4
while value<=6789:
print(value)
value=value+456
'''
'''
cnt=1
while cnt <=5:
print("hello")
cnt=cnt+1
'''
|
##
# Copyright (c) 2012 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
from importkit.meta import Object
from importkit.yaml.validator.tests.base import SchemaTest, raises, result
class A(Object):
def __init__(self, test1, test2):
self.test1 = test1
self.test2 = test2
def __eq__(self, other):
return isinstance(other, A) and other.test1 == self.test1 and other.test2 == self.test2
def __sx_setstate__(self, data):
self.test1 = data['test1']
self.test2 = data['test2']
class TestInheritance(SchemaTest):
def setUp(self):
super().setUp()
self.schema = self.get_schema('inheritance.Schema')
@result(expected_result=A(test1=1, test2='str2'))
def test_validator_inheritance1(self):
"""
test1: 1
test2: str2
"""
@raises(Exception, 'expected integer')
def test_validator_inheritance2(self):
"""
test1: wrong
test2: 2
"""
|
# Copyright (c) 2010 Franz Allan Valencia See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot.api import logger
class Query(object):
"""
Query handles all the querying done by the Database Library.
"""
def query(self, selectStatement, sansTran=False, returnAsDict=False):
"""
Uses the input `selectStatement` to query for the values that will be returned as a list of tuples. Set optional
input `sansTran` to True to run command without an explicit transaction commit or rollback.
Set optional input `returnAsDict` to True to return values as a list of dictionaries.
Tip: Unless you want to log all column values of the specified rows,
try specifying the column names in your select statements
as much as possible to prevent any unnecessary surprises with schema
changes and to easily see what your [] indexing is trying to retrieve
(i.e. instead of `"select * from my_table"`, try
`"select id, col_1, col_2 from my_table"`).
For example, given we have a table `person` with the following data:
| id | first_name | last_name |
| 1 | Franz Allan | See |
When you do the following:
| @{queryResults} | Query | SELECT * FROM person |
| Log Many | @{queryResults} |
You will get the following:
[1, 'Franz Allan', 'See']
Also, you can do something like this:
| ${queryResults} | Query | SELECT first_name, last_name FROM person |
| Log | ${queryResults[0][1]}, ${queryResults[0][0]} |
And get the following
See, Franz Allan
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| @{queryResults} | Query | SELECT * FROM person | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Query | %s ' % selectStatement)
self.__execute_sql(cur, selectStatement)
allRows = cur.fetchall()
if returnAsDict:
mappedRows = []
col_names = [c[0] for c in cur.description]
for rowIdx in range(len(allRows)):
d = {}
for colIdx in range(len(allRows[rowIdx])):
d[col_names[colIdx]] = allRows[rowIdx][colIdx]
mappedRows.append(d)
return mappedRows
return allRows
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def row_count(self, selectStatement, sansTran=False):
"""
Uses the input `selectStatement` to query the database and returns the number of rows from the query. Set
optional input `sansTran` to True to run command without an explicit transaction commit or rollback.
For example, given we have a table `person` with the following data:
| id | first_name | last_name |
| 1 | Franz Allan | See |
| 2 | Jerry | Schneider |
When you do the following:
| ${rowCount} | Row Count | SELECT * FROM person |
| Log | ${rowCount} |
You will get the following:
2
Also, you can do something like this:
| ${rowCount} | Row Count | SELECT * FROM person WHERE id = 2 |
| Log | ${rowCount} |
And get the following
1
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| ${rowCount} | Row Count | SELECT * FROM person | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Row Count | %s ' % selectStatement)
self.__execute_sql(cur, selectStatement)
data = cur.fetchall()
if self.db_api_module_name in ["sqlite3", "ibm_db", "ibm_db_dbi", "pyodbc", "jaydebeapi"]:
rowCount = len(data)
else:
rowCount = cur.rowcount
return rowCount
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def description(self, selectStatement, sansTran=False):
"""
Uses the input `selectStatement` to query a table in the db which will be used to determine the description. Set
optional input `sansTran` to True to run command without an explicit transaction commit or rollback.
For example, given we have a table `person` with the following data:
| id | first_name | last_name |
| 1 | Franz Allan | See |
When you do the following:
| @{queryResults} | Description | SELECT * FROM person |
| Log Many | @{queryResults} |
You will get the following:
[Column(name='id', type_code=1043, display_size=None, internal_size=255, precision=None, scale=None, null_ok=None)]
[Column(name='first_name', type_code=1043, display_size=None, internal_size=255, precision=None, scale=None, null_ok=None)]
[Column(name='last_name', type_code=1043, display_size=None, internal_size=255, precision=None, scale=None, null_ok=None)]
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| @{queryResults} | Description | SELECT * FROM person | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Description | %s ' % selectStatement)
self.__execute_sql(cur, selectStatement)
description = list(cur.description)
if sys.version_info[0] < 3:
for row in range(0, len(description)):
description[row] = (description[row][0].encode('utf-8'),) + description[row][1:]
return description
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def delete_all_rows_from_table(self, tableName, sansTran=False):
"""
Delete all the rows within a given table. Set optional input `sansTran` to True to run command without an
explicit transaction commit or rollback.
For example, given we have a table `person` in a database
When you do the following:
| Delete All Rows From Table | person |
If all the rows can be successfully deleted, then you will get:
| Delete All Rows From Table | person | # PASS |
If the table doesn't exist or all the data can't be deleted, then you
will get:
| Delete All Rows From Table | first_name | # FAIL |
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| Delete All Rows From Table | person | True |
"""
cur = None
selectStatement = ("DELETE FROM %s;" % tableName)
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Delete All Rows From Table | %s ' % selectStatement)
result = self.__execute_sql(cur, selectStatement)
if result is not None:
if not sansTran:
self._dbconnection.commit()
return result
if not sansTran:
self._dbconnection.commit()
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def execute_sql_script(self, sqlScriptFileName, sansTran=False):
"""
Executes the content of the `sqlScriptFileName` as SQL commands. Useful for setting the database to a known
state before running your tests, or clearing out your test data after running each a test. Set optional input
`sansTran` to True to run command without an explicit transaction commit or rollback.
Sample usage :
| Execute Sql Script | ${EXECDIR}${/}resources${/}DDL-setup.sql |
| Execute Sql Script | ${EXECDIR}${/}resources${/}DML-setup.sql |
| #interesting stuff here |
| Execute Sql Script | ${EXECDIR}${/}resources${/}DML-teardown.sql |
| Execute Sql Script | ${EXECDIR}${/}resources${/}DDL-teardown.sql |
SQL commands are expected to be delimited by a semi-colon (';').
For example:
DELETE FROM person_employee_table;
DELETE FROM person_table;
DELETE FROM employee_table;
Also, the last SQL command can optionally omit its trailing semi-colon.
For example:
DELETE FROM person_employee_table;
DELETE FROM person_table;
DELETE FROM employee_table
Given this, that means you can create spread your SQL commands in several
lines.
For example:
DELETE
FROM person_employee_table;
DELETE
FROM person_table;
DELETE
FROM employee_table
However, lines that starts with a number sign (`#`) are treated as a
commented line. Thus, none of the contents of that line will be executed.
For example:
# Delete the bridging table first...
DELETE
FROM person_employee_table;
# ...and then the bridged tables.
DELETE
FROM person_table;
DELETE
FROM employee_table
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| Execute Sql Script | ${EXECDIR}${/}resources${/}DDL-setup.sql | True |
"""
sqlScriptFile = open(sqlScriptFileName ,encoding='UTF-8')
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Execute SQL Script | %s ' % sqlScriptFileName)
sqlStatement = ''
for line in sqlScriptFile:
PY3K = sys.version_info >= (3, 0)
if not PY3K:
#spName = spName.encode('ascii', 'ignore')
line = line.strip().decode("utf-8")
if line.startswith('#'):
continue
elif line.startswith('--'):
continue
sqlFragments = line.split(';')
if len(sqlFragments) == 1:
sqlStatement += line + ' '
else:
for sqlFragment in sqlFragments:
sqlFragment = sqlFragment.strip()
if len(sqlFragment) == 0:
continue
sqlStatement += sqlFragment + ' '
self.__execute_sql(cur, sqlStatement)
sqlStatement = ''
sqlStatement = sqlStatement.strip()
if len(sqlStatement) != 0:
self.__execute_sql(cur, sqlStatement)
if not sansTran:
self._dbconnection.commit()
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def execute_sql_string(self, sqlString, sansTran=False):
"""
Executes the sqlString as SQL commands. Useful to pass arguments to your sql. Set optional input `sansTran` to
True to run command without an explicit transaction commit or rollback.
SQL commands are expected to be delimited by a semi-colon (';').
For example:
| Execute Sql String | DELETE FROM person_employee_table; DELETE FROM person_table |
For example with an argument:
| Execute Sql String | SELECT * FROM person WHERE first_name = ${FIRSTNAME} |
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| Execute Sql String | DELETE FROM person_employee_table; DELETE FROM person_table | True |
"""
cur = None
try:
cur = self._dbconnection.cursor()
logger.info('Executing : Execute SQL String | %s ' % sqlString)
self.__execute_sql(cur, sqlString)
if not sansTran:
self._dbconnection.commit()
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def call_stored_procedure(self, spName, spParams=None, sansTran=False):
"""
Uses the inputs of `spName` and 'spParams' to call a stored procedure. Set optional input `sansTran` to
True to run command without an explicit transaction commit or rollback.
spName should be the stored procedure name itself
spParams [Optional] should be a List of the parameters being sent in. The list can be one or multiple items.
The return from this keyword will always be a list.
Example:
| @{ParamList} = | Create List | FirstParam | SecondParam | ThirdParam |
| @{QueryResults} = | Call Stored Procedure | DBName.SchemaName.StoredProcName | List of Parameters |
Example:
| @{ParamList} = | Create List | Testing | LastName |
| Set Test Variable | ${SPName} = | DBTest.DBSchema.MyStoredProc |
| @{QueryResults} = | Call Stored Procedure | ${SPName} | ${ParamList} |
| Log List | @{QueryResults} |
Using optional `sansTran` to run command without an explicit transaction commit or rollback:
| @{QueryResults} = | Call Stored Procedure | DBName.SchemaName.StoredProcName | List of Parameters | True |
"""
if spParams is None:
spParams = []
cur = None
try:
if self.db_api_module_name in ["cx_Oracle"]:
cur = self._dbconnection.cursor()
else:
cur = self._dbconnection.cursor(as_dict=False)
PY3K = sys.version_info >= (3, 0)
if not PY3K:
spName = spName.encode('ascii', 'ignore')
logger.info('Executing : Call Stored Procedure | %s | %s ' % (spName, spParams))
cur.callproc(spName, spParams)
cur.nextset()
retVal=list()
for row in cur:
#logger.info ( ' %s ' % (row))
retVal.append(row)
if not sansTran:
self._dbconnection.commit()
return retVal
finally:
if cur:
if not sansTran:
self._dbconnection.rollback()
def __execute_sql(self, cur, sqlStatement):
return cur.execute(sqlStatement)
|
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from metalsmith import _network_metadata
class TestMetadataAdd(unittest.TestCase):
def test_metadata_add_links(self):
port = mock.Mock()
network = mock.Mock()
port.id = 'port_id'
port.mac_address = 'aa:bb:cc:dd:ee:ff'
network.mtu = 1500
links = []
expected = [{'id': 'port_id',
'type': 'phy',
'mtu': 1500,
'ethernet_mac_address': 'aa:bb:cc:dd:ee:ff'}]
_network_metadata.metadata_add_links(links, port, network)
self.assertEqual(expected, links)
def test_metadata_add_services(self):
subnet_a = mock.Mock()
subnet_b = mock.Mock()
subnet_a.dns_nameservers = ['192.0.2.1', '192.0.2.2']
subnet_b.dns_nameservers = ['192.0.2.11', '192.0.2.22']
subnets = [subnet_a, subnet_b]
services = []
expected = [{'address': '192.0.2.1', 'type': 'dns'},
{'address': '192.0.2.2', 'type': 'dns'},
{'address': '192.0.2.11', 'type': 'dns'},
{'address': '192.0.2.22', 'type': 'dns'}]
_network_metadata.metadata_add_services(services, subnets)
self.assertEqual(expected, services)
def test_metadata_add_network_ipv4_dhcp(self):
idx = 1
fixed_ip = {'ip_address': '192.0.2.100', 'subnet_id': 'subnet_id'}
port = mock.Mock()
port.id = 'port_id'
subnet = mock.Mock()
subnet.cidr = '192.0.2.0/26'
subnet.ip_version = 4
subnet.is_dhcp_enabled = True
subnet.host_routes = [
{'destination': '192.0.2.64/26', 'nexthop': '192.0.2.1'},
{'destination': '192.0.2.128/26', 'nexthop': '192.0.2.1'}
]
subnet.dns_nameservers = ['192.0.2.11', '192.0.2.22']
network = mock.Mock()
network.id = 'network_id'
network.name = 'net_name'
networks = []
expected = [{'id': 'net_name1',
'ip_address': '192.0.2.100',
'link': 'port_id',
'netmask': '255.255.255.192',
'network_id': 'network_id',
'routes': [{'gateway': '192.0.2.1',
'netmask': '255.255.255.192',
'network': '192.0.2.64'},
{'gateway': '192.0.2.1',
'netmask': '255.255.255.192',
'network': '192.0.2.128'}],
'services': [{'address': '192.0.2.11', 'type': 'dns'},
{'address': '192.0.2.22', 'type': 'dns'}],
'type': 'ipv4_dhcp'}]
_network_metadata.metadata_add_network(networks, idx, fixed_ip, port,
network, subnet)
self.assertEqual(expected, networks)
def test_metadata_add_network_ipv6_stateful(self):
idx = 1
fixed_ip = {'ip_address': '2001:db8:1::10', 'subnet_id': 'subnet_id'}
port = mock.Mock()
port.id = 'port_id'
subnet = mock.Mock()
subnet.cidr = '2001:db8:1::/64'
subnet.ip_version = 6
subnet.ipv6_address_mode = 'dhcpv6-stateful'
subnet.host_routes = [
{'destination': '2001:db8:2::/64', 'nexthop': '2001:db8:1::1'},
{'destination': '2001:db8:3::/64', 'nexthop': '2001:db8:1::1'}
]
subnet.dns_nameservers = ['2001:db8:1::ee', '2001:db8:2::ff']
network = mock.Mock()
network.id = 'network_id'
network.name = 'net_name'
networks = []
expected = [
{'id': 'net_name1',
'ip_address': '2001:db8:1::10',
'link': 'port_id',
'netmask': 'ffff:ffff:ffff:ffff::',
'network_id': 'network_id',
'routes': [{'gateway': '2001:db8:1::1',
'netmask': 'ffff:ffff:ffff:ffff::',
'network': '2001:db8:2::'},
{'gateway': '2001:db8:1::1',
'netmask': 'ffff:ffff:ffff:ffff::',
'network': '2001:db8:3::'}],
'services': [{'address': '2001:db8:1::ee', 'type': 'dns'},
{'address': '2001:db8:2::ff', 'type': 'dns'}],
'type': 'ipv6_dhcpv6-stateful'}]
_network_metadata.metadata_add_network(networks, idx, fixed_ip, port,
network, subnet)
self.assertEqual(expected, networks)
|
from enum import IntEnum
from typing import TYPE_CHECKING, List, Optional, Union
import attr
from attr.converters import optional
from dis_snek.const import MISSING, Absent
from dis_snek.models.discord import DiscordObject
from dis_snek.models.snowflake import to_snowflake
from dis_snek.utils.attr_utils import define
from dis_snek.utils.serializer import dict_filter_none
if TYPE_CHECKING:
from dis_snek.models.discord_objects.guild import Guild
from dis_snek.models.discord_objects.user import User
from dis_snek.models.snowflake import Snowflake_Type
class StickerTypes(IntEnum):
"""Types of sticker."""
STANDARD = 1
"""An official sticker in a pack, part of Nitro or in a removed purchasable pack."""
GUILD = 2
"""A sticker uploaded to a Boosted guild for the guild's members."""
class StickerFormatTypes(IntEnum):
"""File formats for stickers."""
PNG = 1
APNG = 2
LOTTIE = 3
@define(kw_only=False)
class StickerItem(DiscordObject):
name: str = attr.ib()
"""Name of the sticker."""
format_type: StickerFormatTypes = attr.ib(converter=StickerFormatTypes)
"""Type of sticker image format."""
@define()
class Sticker(StickerItem):
"""Represents a sticker that can be sent in messages."""
pack_id: Optional["Snowflake_Type"] = attr.ib(default=None, converter=optional(to_snowflake))
"""For standard stickers, id of the pack the sticker is from."""
description: Optional[str] = attr.ib(default=None)
"""Description of the sticker."""
tags: str = attr.ib()
"""autocomplete/suggestion tags for the sticker (max 200 characters)"""
type: Union[StickerTypes, int] = attr.ib(converter=StickerTypes)
"""Type of sticker."""
available: Optional[bool] = attr.ib(default=True)
"""Whether this guild sticker can be used, may be false due to loss of Server Boosts."""
sort_value: Optional[int] = attr.ib(default=None)
"""The standard sticker's sort order within its pack."""
_user_id: Optional["Snowflake_Type"] = attr.ib(default=None, converter=optional(to_snowflake))
_guild_id: Optional["Snowflake_Type"] = attr.ib(default=None, converter=optional(to_snowflake))
async def get_creator(self) -> "User":
"""
Get the user who created this emoji
Returns:
User object
"""
return await self._client.cache.get_user(self._user_id)
async def get_guild(self) -> "Guild":
"""
Get the guild associated with this emoji
Returns:
Guild object
"""
return await self._client.cache.get_guild(self._guild_id)
async def edit(
self,
name: Absent[Optional[str]] = MISSING,
description: Absent[Optional[str]] = MISSING,
tags: Absent[Optional[str]] = MISSING,
reason: Absent[Optional[str]] = MISSING,
) -> "Sticker":
"""
Edit a sticker
Args:
name: New name of the sticker
description: New description of the sticker
tags: New tags of the sticker
reason: Reason for the edit
Returns:
The updated sticker instance
"""
if not self._guild_id:
raise ValueError("You can only edit guild stickers.")
payload = dict_filter_none(dict(name=name, description=description, tags=tags))
sticker_data = await self._client.http.modify_guild_sticker(payload, self._guild_id, self.id, reason)
return self.update_from_dict(sticker_data)
async def delete(self, reason: Optional[str] = MISSING):
"""
Delete a sticker
Args:
reason: Reason for the deletion
Raises:
ValueError: If you attempt to delete a non-guild sticker
"""
if not self._guild_id:
raise ValueError("You can only delete guild stickers.")
await self._client.http.delete_guild_sticker(self._guild_id, self.id, reason)
@define()
class StickerPack(DiscordObject):
"""Represents a pack of standard stickers."""
stickers: List["Sticker"] = attr.ib(factory=list)
"""The stickers in the pack."""
name: str = attr.ib()
"""Name of the sticker pack."""
sku_id: "Snowflake_Type" = attr.ib()
"""id of the pack's SKU."""
cover_sticker_id: Optional["Snowflake_Type"] = attr.ib(default=None)
"""id of a sticker in the pack which is shown as the pack's icon."""
description: str = attr.ib()
"""Description of the sticker pack."""
banner_asset_id: "Snowflake_Type" = attr.ib() # TODO CDN Asset
"""id of the sticker pack's banner image."""
|
""" Copyright (c) 2017-2021 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
"""
import neoml.PythonWrapper as PythonWrapper
from .Dnn import Layer
from neoml.Utils import check_input_layers
class BertConv(Layer):
"""This layer performs a special convolution used in BERT architecture
This operation extracts the convolution regions as if 1-dimensional kernel of size (kernel size)
would move along (sequence length) padded by ((kernel size) - 1 / 2) zeros from both sides.
Then it applies different kernel values for every position along (sequence length), (batch size) and (num heads).
The only dimension shared betweed different kernels is (head size).
The kernel values are provided by an additional input.
:param input_layer: The input layer and the number of its output. If no number
is specified, the first output will be connected.
:type input_layer: object, tuple(object, int) of list of them
:param name: The layer name.
:type name: str, default=None
.. rublic:: Layer inputs:
(1) convolution data
- **BatchLength** is equal to (sequence length)
- **BatchWidth** is equal to (batch size)
- **Channels** is equal to (attention heads) * (head size)
- others are equal to 1
(2) convolution kernels
- **BatchLength** is equal to (sequence length)
- **BatchWidth** is equal to (batch size) * (attention heads)
- **Height** is equal to (kernel size)
- others are equal to 1
.. rubric:: Layer outputs:
(2) convolution result
- **BatchLength** is equal to (sequence length)
- **BatchWidth** is equal to (batch size) * (attention heads)
- **Height** is equal to (head size)
- others are equal to 1
"""
def __init__(self, input_layer, name=None):
if type(input_layer) is PythonWrapper.BertConv:
super().__init__(input_layer)
return
layers, outputs = check_input_layers(input_layer, 2)
internal = PythonWrapper.BertConv(str(name), layers, outputs)
super().__init__(internal)
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import ana_cont
# NOTE: the kernel 'freq_fermionic_phsym' does not work well for insulating systems
# in any case it is not worse to use the usual 'freq_fermionic' kernel
# and make the data ph-symmetric (i.e. set real part to zero)
iwgrid,giw,err=np.loadtxt('testdata/giw.dat').transpose()
truewgrid,truespec=np.loadtxt('testdata/spectrum.dat').transpose()
wgrid=(np.exp(np.linspace(0.,5.,500))-1.)*5./(np.exp(5.)-1.)
#model=np.ones_like(wgrid)
model=wgrid**2*np.exp(-(wgrid))
model=model/np.trapz(model,wgrid)
niw=iwgrid.shape[0]
probl=ana_cont.AnalyticContinuationProblem(im_axis=iwgrid,re_axis=wgrid,im_data=giw,kernel_mode='freq_fermionic_phsym')
sol=probl.solve(method='maxent_svd',model=model,stdev=10*err)
f1=plt.figure(1)
p1=f1.add_subplot(121)
p2=f1.add_subplot(122)
p1.plot(wgrid,sol[0].A_opt)
p1.plot(truewgrid,truespec)
p2.plot(iwgrid,sol[0].backtransform)
p2.plot(iwgrid,giw,marker='x',linestyle='None')
f1.show()
raw_input()
|
import structure
import util
materials = []
stringer_types = []
def load_wing(file):
f = open("wings/%s.wing" % file, 'r')
lines = f.readlines()
f.close()
return parse_wing(lines)
def parse_wing(lines):
wing = structure.Wing()
engine_lines = []
engine_active = False
fuel_tank_lines = []
fuel_tank_active = False
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if engine_active:
if '}' in line:
engine_active = False
engine_lines.append(line.replace('}', ''))
wing.engine = parse_engine(engine_lines)
else:
engine_lines.append(line)
elif fuel_tank_active:
if '}' in line:
fuel_tank_active = False
fuel_tank_lines.append(line.replace('}', ''))
wing.fuel_tank = parse_fuel_tank(fuel_tank_lines)
else:
fuel_tank_lines.append(line)
elif tokens[0] == "name":
wing.name = tokens[1]
elif tokens[0] == "wing_box":
wing.wing_box = load_wing_box(tokens[1])
elif tokens[0] == "surface_area":
wing.surface_area = float(tokens[1])
elif tokens[0] == "engine":
engine_lines.clear()
engine_lines.append(tokens[1].replace('{', ''))
engine_active = True
elif tokens[0] == "fuel_tank":
fuel_tank_lines.clear()
fuel_tank_lines.append(tokens[1].replace('{', ''))
fuel_tank_active = True
wing.fuel_tank.wing_box = wing.wing_box
return wing
def parse_engine(lines):
engine = structure.Engine()
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if tokens[0] == "position":
pos = tokens[1].split(',')
engine.x = float(pos[0])
engine.y = float(pos[1])
engine.z = float(pos[2])
elif tokens[0] == "thrust":
engine.thrust = float(tokens[1])
elif tokens[0] == "weight":
engine.weight = float(tokens[1])
return engine
def parse_fuel_tank(lines):
fuel_tank = structure.FuelTank()
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if tokens[0] == "range":
fuel_tank_range = tokens[1].split(',')
fuel_tank.start_y = float(fuel_tank_range[0])
fuel_tank.end_y = float(fuel_tank_range[1])
return fuel_tank
def load_wing_box(file):
f = open("wingboxes/%s.wbo" % file, 'r')
lines = f.readlines()
f.close()
return parse_wing_box(lines)
def parse_wing_box(lines):
wing_box = structure.WingBox()
section_lines = []
level = 0
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if level > 0:
if '{' in line:
level += 1
section_lines.append(line)
elif '}' in line:
level -= 1
section_lines.append(line)
if level == 0:
section_lines[-1] = section_lines[-1].replace('}', '')
wing_box.sections.append(parse_wing_box_section(section_lines))
else:
section_lines.append(line)
elif tokens[0] == "name":
wing_box.name = tokens[1]
elif tokens[0] == "range":
wing_box_range = tokens[1].split(',')
wing_box.start_y = float(wing_box_range[0])
wing_box.end_y = float(wing_box_range[1])
elif tokens[0] == "width":
wing_box.width = util.GeometryFunction(tokens[1])
elif tokens[0] == "height":
wing_box.height = util.GeometryFunction(tokens[1])
elif tokens[0] == "material":
wing_box.material = get_material(tokens[1])
elif tokens[0] == "section":
section_lines.clear()
section_lines.append(tokens[1].replace('{', ''))
level += 1
return wing_box
def parse_wing_box_section(lines):
wing_box_section = structure.WingBoxSection()
stringer_set_lines = []
level = 0
corner_set_active = False
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if level > 0:
if '{' in line:
level += 1
elif '}' in line:
level -= 1
stringer_set_lines.append(line)
if level == 0:
stringer_set_lines[-1].replace('}', '')
if corner_set_active:
corner_set_active = False
stringer_set_lines.extend(["name:L-stringer", "amount:2", "range:0.0,1.0", "surface:bottom"])
wing_box_section.stringer_sets.append(parse_stringer_set(stringer_set_lines))
stringer_set_lines[-1] = stringer_set_lines[-1].replace("bottom", "top")
wing_box_section.stringer_sets.append(parse_stringer_set(stringer_set_lines))
else:
wing_box_section.stringer_sets.append(parse_stringer_set(stringer_set_lines))
else:
stringer_set_lines.append(line)
elif tokens[0] == "range":
section_range = tokens[1].split(',')
wing_box_section.start_y = float(section_range[0])
wing_box_section.end_y = float(section_range[1])
elif tokens[0] == "spar_thickness":
spar_thickness = tokens[1].split(',')
wing_box_section.front_spar_t = float(spar_thickness[0])
wing_box_section.back_spar_t = float(spar_thickness[1])
elif tokens[0] == "panel_thickness":
panel_thickness = tokens[1].split(',')
wing_box_section.top_panel_t = float(panel_thickness[0])
wing_box_section.bottom_panel_t = float(panel_thickness[1])
elif tokens[0] == "corner":
stringer_set_lines.clear()
stringer_set_lines.append(tokens[1].replace('{', ''))
corner_set_active = True
level += 1
elif tokens[0] == "stringer_set":
stringer_set_lines.clear()
stringer_set_lines.append(tokens[1].replace('{', ''))
level += 1
return wing_box_section
def parse_stringer_set(lines):
stringer_set = structure.StringerSet()
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if tokens[0] == "name":
stringer_set.stringer_type = get_stringer_type(tokens[1])
elif tokens[0] == "amount":
stringer_set.amount = int(tokens[1])
elif tokens[0] == "stringer_width":
stringer_set.stringer_width = float(tokens[1])
elif tokens[0] == "stringer_height":
stringer_set.stringer_height = float(tokens[1])
elif tokens[0] == "stringer_thickness":
stringer_set.stringer_thickness = float(tokens[1])
elif tokens[0] == "range":
stringers_range = tokens[1].split(',')
stringer_set.start_x = float(stringers_range[0])
stringer_set.end_x = float(stringers_range[1])
elif tokens[0] == "surface":
stringer_set.surface_top = tokens[1] == "top"
return stringer_set
def load_material(file):
f = open("materials/%s.mat" % file, 'r')
lines = f.readlines()
f.close()
return parse_material(lines)
def parse_material(lines):
material = structure.Material()
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if tokens[0] == "name":
material.name = tokens[1]
elif tokens[0] == "e-modulus":
material.e_modulus = float(tokens[1])
elif tokens[0] == "shear_modulus":
material.shear_modulus = float(tokens[1])
elif tokens[0] == "yield_stress":
material.yield_stress = float(tokens[1])
elif tokens[0] == "poisson-factor":
material.poisson_factor = float(tokens[1])
elif tokens[0] == "density":
material.density = float(tokens[1])
return material
def load_load_case(file):
f = open("loadcases/%s.case" % file, 'r')
lines = f.readlines()
f.close()
return parse_load_case(lines)
def parse_load_case(lines):
load_case = structure.LoadCase()
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if tokens[0] == "wing":
load_case.wing = load_wing(tokens[1])
elif tokens[0] == "step":
load_case.step = float(tokens[1])
elif tokens[0] == "load_factor":
load_case.load_factor = float(tokens[1])
elif tokens[0] == "velocity":
load_case.velocity = float(tokens[1])
elif tokens[0] == "air_density":
load_case.density = float(tokens[1])
elif tokens[0] == "aircraft_weight":
load_case.aircraft_weight = float(tokens[1])
elif tokens[0] == "limit_deflection":
load_case.limit_deflection = float(tokens[1])
elif tokens[0] == "limit_twist":
load_case.limit_twist = float(tokens[1])
import numpy
import wingloader
load_case.range = numpy.arange(load_case.wing.wing_box.start_y, load_case.wing.wing_box.end_y, load_case.step)
wingloader.load_wing_properties(load_case, load_case.wing)
return load_case
def load_stringer_type(file):
f = open("stringers/%s.stri" % file, 'r')
lines = f.readlines()
f.close()
return parse_stringer_type(lines)
def parse_stringer_type(lines):
stringer_type = structure.StringerType()
for line in lines:
tokens = [i.strip() for i in line.split(':')]
if tokens[0] == "name":
stringer_type.name = tokens[1]
elif tokens[0] == "area":
stringer_type.area = util.GeometryFunction(tokens[1])
elif tokens[0] == "centroid_x":
stringer_type.centroid_x = util.GeometryFunction(tokens[1])
elif tokens[0] == "centroid_z":
stringer_type.centroid_z = util.GeometryFunction(tokens[1])
elif tokens[0] == "moi_xx":
stringer_type.moi_xx = util.GeometryFunction(tokens[1])
elif tokens[0] == "moi_zz":
stringer_type.moi_zz = util.GeometryFunction(tokens[1])
return stringer_type
def get_stringer_type(name):
if len(stringer_types) == 0:
init_stringer_types()
for stringer_type in stringer_types:
if stringer_type.name == name:
return stringer_type
return None
def init_stringer_types():
import os
for root, dirs, files in os.walk("stringers"):
for file in files:
if file.endswith(".stri"):
stringer_types.append(load_stringer_type(file.split('.')[0]))
def get_material(name):
if len(materials) == 0:
init_material()
for material in materials:
if material.name == name:
return material
return None
def init_material():
import os
for root, dirs, files in os.walk("materials"):
for file in files:
if file.endswith(".mat"):
materials.append(load_material(file.split('.')[0]))
|
from abc import ABC, abstractmethod
from ..timeseries import TimeSeries
from typing import List
class RegressiveModel(ABC):
"""
This is a base class for various implementations of multi-variate models - models predicting time series
from one or several time series. It also allows to do ensembling.
TODO: Extend this to a "DynamicRegressiveModel" class, which acts on List[List[TimeSeries]].
TODO: The first List[] would contain time-sliding lists of time series, letting the model
TODO: be able to learn how to change weights over time. When len() of outer List[] is 0 it's a particular case
"""
@abstractmethod
def __init__(self):
# Stores training date information:
self.train_features: List[TimeSeries] = None
self.train_target: TimeSeries = None
# state
self._fit_called = False
@abstractmethod
def fit(self, train_features: List[TimeSeries], train_target: TimeSeries) -> None:
assert len(train_features) > 0, 'Need at least one feature series'
assert all([s.has_same_time_as(train_target) for s in train_features]), 'All provided time series must ' \
'have the same time index'
self.train_features = train_features
self.train_target = train_target
self._fit_called = True
@abstractmethod
def predict(self, features: List[TimeSeries]) -> TimeSeries:
"""
:return: A TimeSeries containing the prediction obtained from [features], of same length as [features]
"""
assert self._fit_called, 'fit() must be called before predict()'
assert len(features) == len(self.train_features), 'Provided features must have same dimensionality as ' \
'training features. There were {} training features and ' \
'the function has been called with {} features' \
.format(len(self.train_features), len(features))
def residuals(self) -> TimeSeries:
"""
:return: a time series of residuals (absolute errors of the model on the training set)
"""
assert self._fit_called, 'fit() must be called before residuals()'
train_pred = self.predict(self.train_features)
return abs(train_pred - self.train_target)
|
'''
Some applications you write may need multi level options
to perform the tasks you want your users to follow.
For instance, what if you want to list certain items:
list cart contents
list products -shampoo
list locations
etc....
These options may or may not take additional arguments.
The solution in this file is only one way to resolve
this type of problem and is not the most complex as all
code is contained in this single file.
It will support the following calls
list cart contents
list products -shampoo
list locations
get price -p produt_name
help
quit
'''
'''
Define functions that will be called when the user enters in
the appropriate command.
Note that each function takes in an args argument which will be
any flags that the function would support.
'''
def parse_additional_arguments(args):
'''
Helper function for any exposed functions to parse additional
arguments that may be passed to the core function.
'''
arguments = {}
current_argument = None
argument_list = []
for arg in args:
arg = arg.strip()
if len(arg) == 0:
continue
if arg.startswith('-'):
if len(argument_list) and current_argument:
arguments[current_argument] = " ".join(argument_list)
current_argument = arg.lower()
arguments[arg] = None
argument_list = []
else:
argument_list.append(arg)
# Make sure we don't miss any....
if len(argument_list) and current_argument:
arguments[current_argument] = " ".join(argument_list)
return arguments
def list_cart_contents(args):
parsed_arguments = parse_additional_arguments(args)
print("list_cart_contents args = ", parsed_arguments)
def list_products(args):
parsed_arguments = parse_additional_arguments(args)
print("list_products args = ", parsed_arguments)
def list_locations(args):
parsed_arguments = parse_additional_arguments(args)
print("list_locations args = ", parsed_arguments)
def get_price(args):
parsed_arguments = parse_additional_arguments(args)
print("get_price args = ", parsed_arguments)
'''
The next two sections are
1. Functions to print out a menu which is declared as a
dictionary with leaf nodes being actual functions.
2. The menu dictionary itself.
'''
''' ### Section 1 : Display Dictionary as a menu ### '''
def display_menu_help_sub(dictionary, indent):
'''
This function is a recursive funciton if a dictionary
contains a dictionary called by display_menu_help
'''
indent_spaces = ' ' * indent
for sub_command in dictionary:
print(indent_spaces + sub_command)
if isinstance(dictionary[sub_command], dict):
display_menu_help_sub(dictionary[sub_command], indent + 1)
def display_menu_help(menu_dictionary, args):
'''
This function starts the process of iterating over the
dictionary menu. A dictionary can only contain
1. Keys that identify another dictionary
2. Keys that identify actual functions.
'''
print("Shopping Cart Help:")
for command in menu_dictionary.keys():
print(command)
if not callable(menu_dictionary[command]):
display_menu_help_sub(menu_dictionary[command], 1)
print('')
'''
### Section 2 : Define dictioanry menu ###
Set up your program options as a dictionary with leaf nodes
being functions to call.
'''
menu_options = {
"list": {
"cart": {
"contents": list_cart_contents
},
"products": list_products,
"locations": list_locations
} ,
"get": {
"price": get_price
},
"help": display_menu_help,
"quit": quit
}
'''
Finally, we can put a program loop in place that no matter what
is defined above, will work as expected.
Note that both help and quit are special cases
1. Help needs the menu passed along
2. Quit should have NO arguments passed along
Run the program and try different things
list cart contents
list price -p kids shampoo
help
quit
'''
while True:
user_input = input("What would you like to do : > ")
inputs = user_input.split(' ')
if len(inputs) > 0 and inputs[0] in menu_options.keys():
current_action = menu_options
last_command_index = 0
for next_input in inputs:
next_input = next_input.strip()
if len(next_input) == 0 :
continue
last_command_index += 1
if next_input not in current_action.keys():
print("Invalid Command : ", " ".join(inputs))
display_menu_help(menu_options, None)
# Hit something we don't recognize, get out of this loop
break
elif callable(current_action[next_input]):
if next_input == 'help':
# Special case is help where we need to pass in our menu
# dictionary, all others expect just the remainder of the
# arguments provided.
current_action[next_input](menu_options, inputs[last_command_index:])
elif next_input == 'quit':
# Second special case, quit will print out whatever we
# pass in, but we really just want to quit.
current_action[next_input]()
else:
current_action[next_input](inputs[last_command_index:])
# Don't iterate more, the remainder must be arguments
break
else:
current_action = current_action[next_input]
else:
print("Invalid Command : ", " ".join(inputs))
display_menu_help(menu_options, None)
|
from micro_services.site1.server import start_server
start_server()
|
from .build import *
from .default_trainer import *
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.Jackpot.as_view(), name='jackpot'),
]
|
from sb3_contrib.qrdqn.policies import CnnPolicy, MlpPolicy
from sb3_contrib.qrdqn.qrdqn import QRDQN
|
# coding: utf-8
import os
import uvicorn
from shared_code.app import app
from shared_code.app_timer import app
port_http = os.getenv("PORT_HTTP", "8000")
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=int(port_http))
|
# Scraper for Kansas Supreme Court
# CourtID: kan_p
from juriscraper.AbstractSite import logger
from juriscraper.OpinionSiteLinearWebDriven import OpinionSiteLinearWebDriven
class Site(OpinionSiteLinearWebDriven):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court = "Supreme Court"
self.court_id = self.__module__
self.pages = []
self.pages_to_process = 5
self.status = "Published"
self.url = "https://www.kscourts.org/Cases-Opinions/Opinions.aspx"
def _download(self, request_dict={}):
if self.test_mode_enabled():
test_page = super()._download(request_dict)
self.pages.append(test_page)
return
self.initiate_webdriven_session()
self.click_through_filter_form()
self.paginate_through_search_results()
def _process_html(self):
path_non_header_rows = "//table[@class='info-table']//tr[position()>1]"
for page in self.pages:
for row in page.xpath(path_non_header_rows):
self.cases.append(
{
"date": self.get_cell_text(row, 1),
"docket": self.get_cell_text(row, 2),
"name": self.get_cell_text(row, 3),
"url": self.get_cell_link(row, 6),
}
)
def click_through_filter_form(self):
"""Fill out and the submit the form
There is some bug that is preventing the standard webdriver
functions from working on this website. The form elements,
despite appearing proper in a screen shot, are recognized
by the webdriver has have hugely negative X coordinates,
which prevents us from being able to click them. I've tried
an ungodly amount of solution to scroll to the element, or
move it, before clicking, but simply couldn't get it to work.
So instead, we are just executing jQuery scripts on the page
to unselect and select form options before submitting.
"""
# build and execute jQuery to manipulate form
id_status = self.get_form_id("Published")
id_court = self.get_form_id("Court")
jquery_remove = "$('#%s option').each(function(){$(this).removeAttr('selected');});"
jquery_select = (
"$('#%s option[value=\"%s\"]').attr('selected','selected');"
)
jquery = ";".join(
[
jquery_remove % id_status,
jquery_remove % id_court,
jquery_select % (id_status, self.status),
jquery_select % (id_court, self.court),
]
)
self.webdriver.execute_script(jquery)
# submit the form and wait to load
id_submit = self.get_form_id("Filter", "btn")
self.find_element_by_id(id_submit).click()
def get_cell_link(self, row, index):
"""Return the first anchor href in cell [index] in row"""
return row.xpath(".//td[%d]//a/@href" % index)[0]
def get_cell_text(self, row, index):
"""Return the text of cell [index] in row"""
return row.xpath(".//td[%d]" % index)[0].text_content().strip()
def paginate_through_search_results(self):
"""Click through search results pagination and store each page"""
page_numbers_to_process = list(range(2, self.pages_to_process + 1))
logger.info("Adding first result page")
page_current = self.get_page()
self.pages.append(page_current)
for page_number in page_numbers_to_process:
path = "//*[@id='pagination1']//li/a[text()=%d]" % page_number
if not page_current.xpath(path):
logger.info("Done paginating, no more results")
break
logger.info("Adding search result page %d" % page_number)
pagination_link = self.find_element_by_xpath(path)
pagination_link.click()
page_current = self.get_page()
self.pages.append(page_current)
def get_form_id(self, field, field_type="drp"):
"""Return the werid ASPX id attribute for [field] of [field_type]"""
return (
"p_lt_zonePagePlaceholder_pageplaceholder_p_lt_ctl02_"
"OpinionFilter1_filterControl_" + field_type + field
)
def get_dropdown_path(self, id, value):
"""Return xpath for select form's dropdown element [id] with [value]"""
format = "//*[@id='{id}']/option[@value='{value}']"
return format.format(id=id, value=value)
|
"""
Created on Sat Nov 18 23:12:08 2017
@author: Utku Ozbulak - github.com/utkuozbulak
Modified by WooJu Lee
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from .misc_functions import preprocess_image, recreate_image, save_image
from PIL import Image
from PIL import ImageFilter
__all__ = [
"CNNLayerVisualization",
]
class CNNLayerVisualization():
"""
Produces an image that minimizes the loss of a convolution
operation for a specific layer and filter
cnn_layer = "layer1"
block_pos = 0
sub_layer = "conv1"
filter_pos = 5
"""
def __init__(self, model, selected_layer, selected_block, selected_sublayer, selected_filter):
self.model = model
self.model.eval()
self.model_name = model.__class__.__name__
self.selected_layer = selected_layer
self.selected_block = selected_block
self.selected_sublayer = selected_sublayer
self.selected_filter = selected_filter
self.conv_output = 0
self.size = 96
self.initial_learning_rate = 100
self.upscaling_factor = 1.2
self.upscaling_steps = 12
self.iteration_steps = 20
self.blur = True
# Create the folder to export images if not exists
if not os.path.exists('../generated'):
os.makedirs('../generated')
def hook_layer(self):
def hook_function(module, grad_in, grad_out):
# Gets the conv output of the selected filter (from selected layer)
self.conv_output = grad_out[0, self.selected_filter]
# Hook the selected layer
for n, m in self.model.named_modules():
if n == str(self.selected_layer):
for i, j in m[self.selected_block].named_modules():
if i == str(self.selected_sublayer):
j.register_forward_hook(hook_function)
# self.model[self.selected_layer].register_forward_hook(hook_function)
def visualise_layer_with_hooks(self):
# Hook the selected layer
self.hook_layer()
# Generate a random image
sz = self.size
self.created_image = np.uint8(np.random.uniform(150, 180, (sz, sz, 3)))
for i in range(self.upscaling_steps):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=self.initial_learning_rate)
for j in range(self.iteration_steps):
optimizer.zero_grad()
# Assign create image to a variable to move forward in the model
output = self.model(self.processed_image)
# Loss function is the mean of the output of the selected layer/filter
# We try to minimize the mean of the output of that specific filter
loss = -torch.mean(self.conv_output)
print('Upscaling:', str(i+1), 'Iteration:', str(j+1), 'Loss:', "{0:.2f}".format(loss.data.numpy()))
# Backward
loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
# Upscale the image
sz = int(sz * self.upscaling_factor)
self.created_image = Image.fromarray(self.created_image)
self.created_image = self.created_image.resize((sz, sz), resample=Image.BICUBIC)
self.created_image = self.created_image.filter(ImageFilter.BoxBlur(radius=1))
# Save image
if (i+1) % 6 == 0:
# Save image
im_path = '../generated/layer/' + str(self.model_name) + '_'+ str(self.selected_layer) + '_b' + \
str(self.selected_block) + '_' + str(self.selected_sublayer) + '_f' + \
str(self.selected_filter) + '_size' + str(self.size) + '_up' + str(i + 1) + '_blur' + '_lr' + \
str(self.initial_learning_rate) + '.jpg'
save_image(self.created_image, im_path)
if __name__ == '__main__':
# ResNet architecture
# ResNet(
# (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
# (layer1): Sequential(
# (0): BasicBlock(
# (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (1): BasicBlock(
# (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (2): BasicBlock(
# (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (layer2): Sequential(
# (0): BasicBlock(
# (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (downsample): Sequential(
# (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
# (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): BasicBlock(
# (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (2): BasicBlock(
# (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (3): BasicBlock(
# (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (layer3): Sequential(
# (0): BasicBlock(
# (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (downsample): Sequential(
# (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
# (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): BasicBlock(
# (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (2): BasicBlock(
# (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (3): BasicBlock(
# (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (4): BasicBlock(
# (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (5): BasicBlock(
# (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (layer4): Sequential(
# (0): BasicBlock(
# (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (downsample): Sequential(
# (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
# (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (1): BasicBlock(
# (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# (2): BasicBlock(
# (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (relu): ReLU(inplace=True)
# (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
# (fc): Linear(in_features=512, out_features=1000, bias=True)
# )
# ResNet34 architecture
# (conv1)-(bn1)-(relu)-(maxpool)-(layer1)-(layer2)-(layer3)-(layer4)
#
cnn_layer = "layer4"
block_pos = 2
sub_layer = "conv2"
filter_pos = 5
# Fully connected layer is not needed
pretrained_model = models.resnet34(pretrained=True).eval()
# res_layers = list(pretrained_model.children())
layer_vis = CNNLayerVisualization(pretrained_model, cnn_layer, block_pos, sub_layer, filter_pos)
# Layer visualization with pytorch hooks
layer_vis.visualise_layer_with_hooks()
# Layer visualization without pytorch hooks
# layer_vis.visualise_layer_without_hooks()
|
from rest_framework import serializers
from core.models_charity import CharityModel, DonationModel
class CharityModelSerializer(serializers.ModelSerializer):
"""Serializer for new charity event"""
class Meta:
model = CharityModel
fields = '__all__'
read_only_fields = ('id', 'donation')
|
""" OpenstackDriver for Network
based on BaseDriver
"""
from keystoneauth1.identity import v3
from keystoneauth1 import session
from neutronclient.v2_0 import client
from calplus.v1.network.drivers.base import BaseDriver, BaseQuota
PROVIDER = "OPENSTACK"
class NeutronDriver(BaseDriver):
"""docstring for OpenstackDriver"""
def __init__(self, cloud_config):
super(NeutronDriver, self).__init__()
self.auth_url = cloud_config['os_auth_url']
self.project_name = cloud_config['os_project_name']
self.username = cloud_config['os_username']
self.password = cloud_config['os_password']
self.user_domain_name = \
cloud_config.get('os_project_domain_name', 'default')
self.project_domain_name = \
cloud_config.get('os_user_domain_name', 'default')
self.driver_name = \
cloud_config.get('driver_name', 'default')
self.tenant_id = cloud_config.get('tenant_id', None)
self.limit = cloud_config.get('limit', None)
self._setup()
def _setup(self):
auth = v3.Password(auth_url=self.auth_url,
user_domain_name=self.user_domain_name,
username=self.username,
password=self.password,
project_domain_name=self.project_domain_name,
project_name=self.project_name)
sess = session.Session(auth=auth)
self.client = client.Client(session=sess)
self.network_quota = OpenstackQuota(
self.client, self.tenant_id, self.limit)
def _check_external_network(self):
networks = self.client.list_networks().get('networks')
for network in networks:
external = network.get('provider:physical_network')
if external is not None:
return network.get('id')
return None
def _check_router_external_gateway(self):
routers = self.client.list_routers().get('routers')
for router in routers:
external = router.get('external_gateway_info')
if external is not None:
return router.get('id')
return None
def create(self, name, cidr, **kargs):
admin_state_up = kargs.pop('admin_state_up', True)
ip_version = kargs.pop('ip_version', 4)
# step1: create network with empty name and admin_state_up
network = {'name': '',
'admin_state_up': admin_state_up}
net = self.client.create_network({'network': network}).get('network')
network_id = net['id']
# step 2: create subnet
sub = {"network_id": network_id,
"ip_version": ip_version,
"cidr": cidr,
"name": name}
subnet = self.client.create_subnet({'subnet': sub}).get('subnet')
result = {'name': subnet['name'],
'description': None,
'id': subnet['id'],
'cidr': subnet['cidr'],
'cloud': PROVIDER,
'gateway_ip': subnet['gateway_ip'],
'security_group': None,
'allocation_pools': subnet['allocation_pools'],
'dns_nameservers': subnet['dns_nameservers']
}
return result
def show(self, subnet_id):
subnet = self.client.show_subnet(subnet_id).get('subnet')
result = {'name': subnet['name'],
'description': None,
'id': subnet['id'],
'cidr': subnet['cidr'],
'cloud': PROVIDER,
'gateway': subnet['gateway_ip'],
'security_group': None,
'allocation_pools': subnet['allocation_pools'],
'dns_nameservers': subnet['dns_nameservers']
}
return result
def list(self, **search_opts):
subnets = self.client.list_subnets(**search_opts).get('subnets')
result = []
for subnet in subnets:
sub = {'name': subnet['name'],
'description': None,
'id': subnet['id'],
'cidr': subnet['cidr'],
'cloud': PROVIDER,
'gateway': subnet['gateway_ip'],
'security_group': None,
'allocation_pools': subnet['allocation_pools'],
'dns_nameservers': subnet['dns_nameservers']
}
result.append(sub)
return result
def update(self, network_id, network):
# Now we can't update network, I'm trying again
return None
def delete(self, network_id):
return self.client.delete_network(network_id)
def connect_external_net(self, subnet_id):
router_id = self._check_router_external_gateway()
if router_id is None:
network_id = self._check_external_network()
if network_id is None:
raise Exception()
router = {
"name": "default",
"external_gateway_info": {
"network_id": "{}".format(network_id)
}
}
router = self.create_router({'router': router})
body = {
"subnet_id": "{}".format(subnet_id)
}
return self.client.add_interface_router(router_id, body)
def disconnect_external_net(self, network_id):
#just detach all connect to router have external_gateway
pass
def allocate_public_ip(self):
external_net = self._check_external_network()
if external_net:
create_dict = {'floating_network_id': external_net,
'tenant_id': self.network_quota.tenant_id}
self.client.create_floatingip({'floatingip': create_dict})
else:
return False
return True
def list_public_ip(self, **search_opts):
"""
:param search_opts:
:return: list PublicIP
"""
result = self.client.list_floatingips(**search_opts)
ips = result.get('floatingips')
return_format = []
for ip in ips:
return_format.append({
'public_ip': ip.get('floating_ip_address'),
'id': ip.get('id')
})
return return_format
def release_public_ip(self, public_ip_id):
self.client.delete_floatingip(public_ip_id)
return True
class OpenstackQuota(BaseQuota):
"""docstring for OpenstackQuota"""
def __init__(self, client, tenant_id=None, limit=None):
super(OpenstackQuota, self).__init__()
self.client = client
self.tenant_id = tenant_id
self.limit = limit
self._setup()
def _setup(self):
if self.tenant_id is None:
self.tenant_id = \
self.client.get_quotas_tenant().get('tenant')['tenant_id']
if self.limit is None:
self.limit = self.client.show_quota(self.tenant_id).get('quota')
def get_networks(self):
subnets = self.client.list_subnets().get('subnets')
list_cidrs = []
for subnet in subnets:
list_cidrs.append({
"net_id": subnet['id'],
"cidr": "{}".format(subnet['cidr']),
"allocation_pools": subnet['allocation_pools']
})
networks = {
"max": self.limit['network'],
"used": len(list_cidrs),
"list_cidrs": list_cidrs,
"VPCs": None
}
return networks
def get_security_groups(self):
list_security_groups = self.client.list_security_groups(
tenant_id=self.tenant_id).get('security_groups')
list_scgs = []
for scg in list_security_groups:
list_scgs.append({
"security_group_id": scg['id'],
"rules_max": self.limit['security_group_rule'],
"rules_used": len(scg['security_group_rules']),
"list_rules": scg['security_group_rules']
})
security_groups = {
"max": self.limit['security_group'],
"used": len(list_security_groups),
"list_security_groups": list_scgs
}
return security_groups
def get_floating_ips(self):
ips = self.client.list_floatingips().get('floatingips')
list_ips = []
for ip in ips:
list_ips.append(ip['floating_ip_address'])
floating_ips = {
"max": self.limit['security_group'],
"used": len(list_ips),
"list_floating_ips": list_ips
}
return floating_ips
def get_routers(self):
rts = self.client.list_routers().get('routers')
list_routers = []
for router in rts:
list_routers.append({
"router_id": router['id'],
"is_gateway": True
})
routers = {
"max": self.limit['router'],
"used": len(list_routers),
"list_routers": list_routers
}
return routers
def get_internet_gateways(self):
routers = self.client.list_routers().get('routers')
internet_gateways = []
for router in routers:
egi = router.get('external_gateway_info', None)
if egi is not None:
internet_gateways.append({
'internet_gateway_id': router['id']
})
return internet_gateways
|
from setuptools import setup
from setuptools import find_packages
setup(
name='packaging_project', # project name
version='0.1.0',
description="Brando's sample packaging tutorial",
#url
author='Brando Miranda',
author_email='miranda9@illinois.edu',
license='MIT',
packages=find_packages(), # default find_packages(where='.', exculde=())
install_requires=['torch','numpy','scikit-learn','scipy','matplotlib','pyyml','torchviz','tensorboard',
'graphviz','torchvision','matplotlib']
)
|
from util.util import base
from util import grid
import numpy as np
class solve_day(base):
def __init__(self, type='data'):
super().__init__(type=type)
self.data = grid.parse_as_grid(self.data)
def part1(self):
path, runs = grid.path_of_least_resistance(self.data)
return sum([self.data[p[1]][p[0]] for p in path][1:])
def part2(self):
data = self.data.copy()
starting_grid = data.copy()
for i in range(1,5):
new_data = starting_grid+1
new_data = np.where(new_data>9, 1, new_data)
starting_grid = new_data.copy()
data = np.append(data, new_data, axis=1)
starting_grid = data.copy()
for i in range(1,5):
new_data = starting_grid+1
new_data = np.where(new_data>9, 1, new_data)
starting_grid = new_data.copy()
data = np.append(data, new_data, axis=0)
path, runs = grid.path_of_least_resistance(data)
return sum([data[p[1]][p[0]] for p in path][1:])
if __name__ == '__main__':
s = solve_day('lines')
s.sub(s.part1(), part='a')
s.sub(s.part2(), part='b')
|
import warnings
from abc import ABC
from typing import Optional, Union, List
import deprecation
from py4j.java_gateway import JavaObject
from .local_predictor import LocalPredictable
from .mixins import HasLazyPrintTransformInfo
from ..batch.base import BatchOperator, BatchOperatorWrapper
from ..common.types.bases.model_stream_scan_params import ModelStreamScanParams
from ..common.types.bases.params import Params
from ..common.types.bases.with_params import WithParams
from ..common.types.conversion.type_converters import py_list_to_j_array
from ..common.types.file_system.file_system import FilePath
from ..py4j_util import get_java_gateway, get_java_class
from ..stream.base import StreamOperatorWrapper, StreamOperator
LAZY_PRINT_TRANSFORM_DATA_ENABLED = "lazyPrintTransformDataEnabled"
LAZY_PRINT_TRANSFORM_DATA_TITLE = "lazyPrintTransformDataTitle"
LAZY_PRINT_TRANSFORM_DATA_NUM = "lazyPrintTransformDataNum"
LAZY_PRINT_TRANSFORM_STAT_ENABLED = "lazyPrintTransformStatEnabled"
LAZY_PRINT_TRANSFORM_STAT_TITLE = "lazyPrintTransformStatTitle"
LAZY_PRINT_TRAIN_INFO_ENABLED = "lazyPrintTrainInfoEnabled"
LAZY_PRINT_TRAIN_INFO_TITLE = "lazyPrintTrainInfoTitle"
LAZY_PRINT_MODEL_INFO_ENABLED = "lazyPrintModelInfoEnabled"
LAZY_PRINT_MODEL_INFO_TITLE = "lazyPrintModelInfoTitle"
__all__ = ['Transformer', 'Model', 'PipelineModel', 'Estimator', 'Pipeline', 'TuningEvaluator']
class Transformer(WithParams, HasLazyPrintTransformInfo):
"""
The base class of all :py:class:`Transformer` s.
Its instance wraps a Java object of type `Transformer`.
"""
def __init__(self, j_transformer: Optional[JavaObject] = None, *args, **kwargs):
"""
Construct a Java object, then set its parameters with a :py:class:`Params` instance.
The :py:class:`Params` instance is constructed from `args` and `kwargs` using :py:func:`Params.from_args`.
Following ways of constructors are supported:
#. if `j_transformer` is not `None`, directly wrap it;
#. if `j_transformer` is `None`, construct a Java object of class `cls_name` with empty constructor (`cls_name` is obtained from `kwargs` with key `CLS_NAME`);
`name` and `OP_TYPE` are optionally extracted from `kwargs` with key `name` and `OP_TYPE` respectively.
:param j_transformer: a Java `Transformer` object.
:param args: arguments.
:param kwargs: keyword arguments.
"""
super().__init__(*args, **kwargs)
clsName = kwargs.pop('CLS_NAME', None)
self.opType = kwargs.pop('OP_TYPE', 'FUNCTION')
if j_transformer is None:
self.j_transformer = get_java_gateway().jvm.__getattr__(clsName)()
else:
self.j_transformer = j_transformer
def get_j_obj(self):
return self.j_transformer
def transform(self, op: Union[BatchOperator, StreamOperator]) -> Union[BatchOperator, StreamOperator]:
"""
Apply transformation on operator `op`.
:param op: the operator for the transformation to be applied.
:return: the transformation result.
"""
if isinstance(op, BatchOperator):
return BatchOperatorWrapper(self.get_j_obj().transform(op.get_j_obj()))
else:
return StreamOperatorWrapper(self.get_j_obj().transform(op.get_j_obj()))
@staticmethod
def wrap_transformer(j_transformer: JavaObject):
"""
Wrap a Java instance of `Transformer` to a Python object of corresponding class.
:param j_transformer: a Java instance of `Transformer`.
:return: a Python object with corresponding class.
"""
model_name = j_transformer.getClass().getSimpleName()
import importlib
model_cls = getattr(importlib.import_module("pyalink.alink.pipeline"), model_name)
return model_cls(j_transformer=j_transformer)
class TransformerWrapper(Transformer):
def __init__(self, j_transformer):
super(TransformerWrapper, self).__init__(j_transformer=j_transformer)
class Model(Transformer):
"""
The base class of all :py:class:`Model` s.
Its instance wraps a Java object of type `Model`.
"""
def __init__(self, j_model: Optional[JavaObject] = None, *args, **kwargs):
"""
Construct a Java object, then set its parameters with a :py:class:`Params` instance.
The :py:class:`Params` instance is constructed from `args` and `kwargs` using :py:func:`Params.from_args`.
Following ways of constructors are supported:
#. if `j_model` is not `None`, directly wrap it;
#. if `j_model` is `None`, construct a Java object of class `cls_name` with empty constructor (`cls_name` is obtained from `kwargs` with key `CLS_NAME`);
`name` and `OP_TYPE` are optionally extracted from `kwargs` with key `name` and `OP_TYPE` respectively.
:param j_model: a Java `Model` object.
:param args: arguments.
:param kwargs: keyword arguments.
"""
super(Model, self).__init__(j_transformer=j_model, *args, **kwargs)
def getModelData(self) -> BatchOperator:
"""
Get the model data.
:return: model data.
"""
return BatchOperatorWrapper(self.get_j_obj().getModelData())
def setModelData(self, model_data: BatchOperator):
"""
Set the model data.
:param model_data: model data.
:return: `self`.
"""
self.get_j_obj().setModelData(model_data.get_j_obj())
return self
@staticmethod
def wrap_model(j_model: JavaObject):
"""
Wrap a Java instance of `Model` to a Python object of corresponding class.
:param j_model: a Java instance of `Model`.
:return: a Python object with corresponding class.
"""
model_name = j_model.getClass().getSimpleName()
import importlib
model_cls = getattr(importlib.import_module("pyalink.alink.pipeline"), model_name)
return model_cls(j_model=j_model)
class PipelineModel(Model, ModelStreamScanParams, LocalPredictable):
"""
A Python equivalent to Java `PipelineModel`.
"""
def __init__(self, *args, **kwargs):
"""
Construct a Java object of `PipelineModel` and wrap it.
Following ways of constructors are supported:
#. if `args` has only 1 entry and the entry is an instance of `JavaObject`, directly wrap it;
#. Otherwise, construct a Java object of `PipelineModel` with `args` as stages.
:param args: arguments.
:param kwargs: keyword arguments.
"""
if len(args) == 1 and isinstance(args[0], JavaObject):
j_model = args[0]
else:
j_pipeline_model_cls = get_java_class("com.alibaba.alink.pipeline.PipelineModel")
j_transformer_base_cls = get_java_class("com.alibaba.alink.pipeline.TransformerBase")
j_transformers = list(map(lambda d: d.get_j_obj(), args))
j_transformer_arr = py_list_to_j_array(j_transformer_base_cls, len(args), j_transformers)
j_model = j_pipeline_model_cls(j_transformer_arr)
super().__init__(j_model, **kwargs)
def getTransformers(self) -> List[Union[Transformer, Model]]:
"""
Get all stages in this pipeline model.
:return: all stages.
"""
j_op_type_util_cls = get_java_class("com.alibaba.alink.python.utils.OpTypeUtil")
j_transformers = self.get_j_obj().getTransformers()
transformers = []
for j_transformer in j_transformers:
if j_op_type_util_cls.isModelBase(j_transformer):
transformer = Model.wrap_model(j_transformer)
elif j_op_type_util_cls.isTransformerBase(j_transformer):
transformer = Transformer.wrap_transformer(j_transformer)
else:
raise ValueError("Invalid transformer: {}".format(j_transformer))
transformers.append(transformer)
return transformers
def save(self, file_path: Optional[Union[str, FilePath]] = None, overwrite: bool = False, numFiles: int = 1):
"""
Save the data in the pipline model.
#. if `file_path` is not set, the data is returned as a :py:class:`BatchOperator`;
#. if `file_path` is set, the data operator is linked to a :py:class:`AkSinkBatchOp` with parameters `overwrite` and `numFiles`.
:param file_path: if set, file path to be written.
:param overwrite: whether of overwrite file path.
:param numFiles: number of files to be written.
:return: data operator when `file_path` not set.
"""
if file_path is None:
return BatchOperatorWrapper(self.get_j_obj().save())
elif isinstance(file_path, str):
self.get_j_obj().save(file_path, overwrite)
elif isinstance(file_path, FilePath):
self.get_j_obj().save(file_path.get_j_obj(), overwrite, numFiles)
else:
raise ValueError("file_path must be str or FilePath")
@staticmethod
def collectLoad(op: BatchOperator) -> 'PipelineModel':
"""
Trigger the program execution like :py:func:`BatchOperator.execute`, and load data stored in `op`
to a pipeline model.
:param op: a batch operator storing the data.
:return: a pipeline model.
"""
_j_pipeline_model_cls = get_java_class("com.alibaba.alink.pipeline.PipelineModel")
j_pipeline_model = _j_pipeline_model_cls.collectLoad(op.get_j_obj())
PipelineModel._check_lazy_params(j_pipeline_model)
return PipelineModel(j_pipeline_model)
@staticmethod
def load(file_path: Union[str, FilePath]) -> 'PipelineModel':
"""
Load data stored in `file_path` to a pipeline model.
The `file_path` must be a valid Ak file or directory, i.e. written by :py:class:`AkSinkBatchOp`.
No program execution is triggered.
:param file_path: a valid Ak file or directory storing the data.
:return: a pipeline model.
"""
_j_pipeline_model_cls = get_java_class("com.alibaba.alink.pipeline.PipelineModel")
if isinstance(file_path, (str,)):
path = file_path
j_pipeline_model = _j_pipeline_model_cls.load(path)
elif isinstance(file_path, FilePath):
operator = file_path
j_pipeline_model = _j_pipeline_model_cls.load(operator.get_j_obj())
else:
raise ValueError("file_path must be str or FilePath")
PipelineModel._check_lazy_params(j_pipeline_model)
return PipelineModel(j_pipeline_model)
@staticmethod
def _check_lazy_params(pipeline_model):
for j_transformer in pipeline_model.getTransformers():
params = Params.fromJson(j_transformer.getParams().toJson())
transformer = TransformerWrapper(j_transformer)
if params.get(LAZY_PRINT_TRANSFORM_STAT_ENABLED, False):
transformer.enableLazyPrintTransformStat(params.get(LAZY_PRINT_TRANSFORM_STAT_TITLE))
if params.get(LAZY_PRINT_TRANSFORM_DATA_ENABLED, False):
transformer.enableLazyPrintTransformData(
params.get(LAZY_PRINT_TRANSFORM_DATA_NUM),
params.get(LAZY_PRINT_TRANSFORM_DATA_TITLE))
class Estimator(WithParams, HasLazyPrintTransformInfo):
"""
The base class of all :py:class:`Estimator` s.
Its instance wraps a Java object of type `Estimator`.
"""
def __init__(self, j_estimator: Optional[JavaObject] = None, *args, **kwargs):
"""
Construct a Java object, then set its parameters with a :py:class:`Params` instance.
The :py:class:`Params` instance is constructed from `args` and `kwargs` using :py:func:`Params.from_args`.
Following ways of constructors are supported:
#. if `j_estimator` is not `None`, directly wrap it;
#. if `j_estimator` is `None`, construct a Java object of class `cls_name` with empty constructor (`cls_name` is obtained from `kwargs` with key `CLS_NAME`);
`name` and `OP_TYPE` are optionally extracted from `kwargs` with key `name` and `OP_TYPE` respectively.
:param j_estimator: a Java `Model` object.
:param args: arguments.
:param kwargs: keyword arguments.
"""
super().__init__(*args, **kwargs)
clsName = kwargs.pop('CLS_NAME', None)
self.opType = kwargs.pop('OP_TYPE', 'FUNCTION')
if j_estimator is None:
self._j_estimator = get_java_gateway().jvm.__getattr__(clsName)()
else:
self._j_estimator = j_estimator
def get_j_obj(self):
return self._j_estimator
def fit(self, op: BatchOperator):
"""
Fit a model on given data `op`.
:param op: data.
:return: the fitted model.
"""
j_model = self.get_j_obj().fit(op.get_j_obj())
return Model.wrap_model(j_model)
def fitAndTransform(self, op):
"""
Fit a model and then apply model on the data `op`.
:param op: data.
:return: the transformation result.
"""
if isinstance(op, BatchOperator):
return BatchOperatorWrapper(self.get_j_obj().fitAndTransform(op.get_j_obj()))
else:
return StreamOperatorWrapper(self.get_j_obj().fitAndTransform(op.get_j_obj()))
@staticmethod
def wrap_estimator(j_estimator: JavaObject):
"""
Wrap a Java instance of `Estimator` to a Python object of corresponding class.
:param j_estimator: a Java instance of `Estimator`.
:return: a Python object with corresponding class.
"""
model_name = j_estimator.getClass().getSimpleName()
import importlib
model_cls = getattr(importlib.import_module("pyalink.alink.pipeline"), model_name)
return model_cls(j_estimator=j_estimator)
class EstimatorWrapper(Estimator):
def __init__(self, j_estimator):
super(EstimatorWrapper, self).__init__(j_estimator=j_estimator)
class Pipeline(Estimator):
"""
A Python equivalent to Java `Pipeline`.
"""
def __init__(self, *stages: Union[Estimator, Transformer], j_pipeline: Optional[JavaObject] = None):
"""
Construct a Java object of `Pipeline` and wrap it.
Following ways of constructors are supported:
#. if `j_pipeline` is not `None`, directly wrap it;
#. Otherwise, construct a Java object of `Pipeline` with `stages`.
:param stages: stages.
:param j_pipeline: a Java object of `Pipeline`.
"""
super(Estimator, self).__init__()
j_pipeline_cls = get_java_gateway().jvm.com.alibaba.alink.pipeline.Pipeline
j_pipeline_stage_base = get_java_gateway().jvm.com.alibaba.alink.pipeline.PipelineStageBase
self._stages = list(stages)
if j_pipeline is None:
num = len(stages)
args = get_java_gateway().new_array(j_pipeline_stage_base, num)
for i, stage in enumerate(stages):
args[i] = stage.get_j_obj()
self._j_pipeline = j_pipeline_cls(args)
else:
self._j_pipeline = j_pipeline
def get_j_obj(self):
return self._j_pipeline
def add(self, *args: Union[Estimator, Transformer]):
"""
Append a stage to the end of this pipeline.
Note: there is a deprecated usage which accepts `(index, stage)` as arguments. `Deprecated since version 1.3.0.`
:param args: a single stage.
:return: `self`.
"""
if len(args) == 1:
index = self.size()
stage = args[0]
else:
index = args[0]
stage = args[1]
warnings.warn("usage of add(index, stage) is deprecated", DeprecationWarning, stacklevel=2)
self.get_j_obj().add(index, stage.get_j_obj())
self._stages.insert(index, stage)
return self
@deprecation.deprecated("1.3.0")
def remove(self, index):
"""
Remove stage by `index`.
:param index: the index of stage to be removed.
:return: the removed stage.
"""
self._j_pipeline.remove(index)
return self._stages.pop(index)
def get(self, index):
"""
Get stage by `index`.
:param index: the index of stage.
:return: the stage.
"""
return self._stages[index]
def size(self):
"""
Get the number of stages.
:return: the number of stages.
"""
return len(self._stages)
def fit(self, op: BatchOperator) -> PipelineModel:
"""
Fit on the data operator `op`.
:param op: the data operator.
:return: a pipeline model.
"""
return PipelineModel(self._j_pipeline.fit(op.get_j_obj()))
def save(self, file_path: Optional[Union[str, FilePath]] = None, overwrite: bool = False, numFiles: int = 1):
"""
Save the data in the pipline.
#. if `file_path` is not set, the data is returned as a :py:class:`BatchOperator`;
#. if `file_path` is set, the data operator is linked to a :py:class:`AkSinkBatchOp` with parameters `overwrite` and `numFiles`.
:param file_path: if set, file path to be written.
:param overwrite: whether of overwrite file path.
:param numFiles: number of files to be written.
:return: data operator when `file_path` not set.
"""
if file_path is None:
return BatchOperatorWrapper(self.get_j_obj().save())
elif isinstance(file_path, str):
self.get_j_obj().save(file_path)
elif isinstance(file_path, FilePath):
self.get_j_obj().save(file_path.get_j_obj(), overwrite, numFiles)
else:
raise ValueError("file_path must be str or FilePath")
@staticmethod
def collectLoad(op: BatchOperator) -> 'Pipeline':
"""
Trigger the program execution like :py:func:`BatchOperator.execute`, and load data stored in `op`
to a pipeline.
:param op: a batch operator storing the data.
:return: a pipeline.
"""
_j_pipeline_cls = get_java_class("com.alibaba.alink.pipeline.Pipeline")
j_pipeline = _j_pipeline_cls.collectLoad(op.get_j_obj())
stages = Pipeline._check_lazy_params(j_pipeline)
return Pipeline(*stages, j_pipeline=j_pipeline)
@staticmethod
def load(file_path: Union[str, FilePath]):
"""
Load data stored in `file_path` to a pipeline.
The `file_path` must be a valid Ak file or directory, i.e. written by :py:class:`AkSinkBatchOp`.
No program execution is triggered.
:param file_path: a valid Ak file or directory storing the data.
:return: a pipeline.
"""
_j_pipeline_cls = get_java_class("com.alibaba.alink.pipeline.Pipeline")
if isinstance(file_path, (str,)):
path = file_path
j_pipeline = _j_pipeline_cls.load(path)
elif isinstance(file_path, FilePath):
operator = file_path
j_pipeline = _j_pipeline_cls.load(operator.get_j_obj())
else:
raise ValueError("file_path must be str or FilePath")
stages = Pipeline._check_lazy_params(j_pipeline)
return Pipeline(*stages, j_pipeline=j_pipeline)
@staticmethod
def _check_lazy_params(j_pipeline):
j_op_type_util_cls = get_java_class("com.alibaba.alink.python.utils.OpTypeUtil")
stages = []
for i in range(j_pipeline.size()):
j_stage = j_pipeline.get(i)
params = Params.fromJson(j_stage.getParams().toJson())
if j_op_type_util_cls.isEstimatorBase(j_stage):
stage = Estimator.wrap_estimator(j_stage)
if params.get(LAZY_PRINT_MODEL_INFO_ENABLED, False):
stage.enableLazyPrintModelInfo(params.get(LAZY_PRINT_MODEL_INFO_TITLE))
if params.get(LAZY_PRINT_TRAIN_INFO_ENABLED, False):
stage.enableLazyPrintTrainInfo(params.get(LAZY_PRINT_TRAIN_INFO_TITLE))
elif j_op_type_util_cls.isModelBase(j_stage):
stage = Model.wrap_model(j_stage)
elif j_op_type_util_cls.isTransformerBase(j_stage):
stage = Transformer.wrap_transformer(j_stage)
else:
raise ValueError("stages are not correct.")
if params.get(LAZY_PRINT_TRANSFORM_STAT_ENABLED, False):
stage.enableLazyPrintTransformStat(params.get(LAZY_PRINT_TRANSFORM_STAT_TITLE))
if params.get(LAZY_PRINT_TRANSFORM_DATA_ENABLED, False):
stage.enableLazyPrintTransformData(
params.get(LAZY_PRINT_TRANSFORM_DATA_NUM),
params.get(LAZY_PRINT_TRANSFORM_DATA_TITLE))
stages.append(stage)
return stages
class PipelineWrapper(Pipeline):
def __init__(self, j_pipeline):
super(Estimator, self).__init__()
self.op = j_pipeline
class TuningEvaluator(WithParams, ABC):
"""
Evaluator in parameter tuning operators.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
cls_name = kwargs.pop('CLS_NAME', None)
self.evaluator = get_java_class(cls_name)()
def get_j_obj(self):
return self.evaluator
def evaluate(self, op: BatchOperator) -> float:
"""
Obtain evaluation metric from prediction result `op`.
:param op: prediction result.
:return: evaluation metric.
"""
return self.get_j_obj().evaluate(op)
def isLargerBetter(self) -> bool:
"""
Indicate whether larger evaluation metric is better or not.
:return: whether larger evaluation metric is better.
"""
return self.get_j_obj().isLargerBetter()
def getMetricParamInfo(self) -> str:
"""
Name of evaluation metric.
:return: name of evaluation metric.
"""
return self.get_j_obj().getMetricParamInfo().getName()
|
# Embedded file name: c:\Jenkins\live\output\win_32_static\Release\midi-remote-scripts\Push\GlobalPadParameters.py
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from . import Sysex
class GlobalPadParameters(ControlSurfaceComponent):
def __init__(self, aftertouch_threshold = 0, *a, **k):
super(GlobalPadParameters, self).__init__(*a, **k)
self._pad_parameter_element = None
self._aftertouch_threshold = aftertouch_threshold
return
def _get_aftertouch_threshold(self):
return self._aftertouch_threshold
def _set_aftertouch_threshold(self, value):
self._aftertouch_threshold = value
self._update_pad_parameter_element()
aftertouch_threshold = property(_get_aftertouch_threshold, _set_aftertouch_threshold)
def set_pad_parameter(self, element):
self._pad_parameter_element = element
self._update_pad_parameter_element()
def _update_pad_parameter_element(self):
if self._pad_parameter_element:
self._pad_parameter_element.send_value(Sysex.make_pad_parameter_message(aftertouch_threshold=self._aftertouch_threshold))
def update(self):
super(GlobalPadParameters, self).update()
if self.is_enabled():
self._update_pad_parameter_element()
|
version = '0.8.1-dev'
|
from math import floor
# Import random package to generate random values and initialize number
# generator
from random import seed, randint
seed()
# Import os package to clear sceen
from os import system, name
################################################
# Declaration of Variables body section
################################################
# Declaring variable that will hold an empty position of the board
empty_pos = []
# List of size one
empty_pos.append(0)
################################################
# Function body section
################################################
# This function will clear the console
def cls():
# Windows OS
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
# This function initializes an empty board of sudoku
def init_empty(sudoku_board):
sudoku_board.clear()
for i in range(0,81):
sudoku_board.append(0)
find_an_empty_square(sudoku_board)
# This function will return true if the board is not yet filled up, note that
# if it is the case, it will record the first free space
# from the beginning.
# Else it will return false
def find_an_empty_square(sudoku_board):
#Board is empty, return False
if len(sudoku_board) == 0:
return False
for i in range(0,81):
if sudoku_board[i] == 0:
empty_pos[0] = i
return True
return False
# This function checks the temptative move is valid in the testing position
# All according to traditional sudoku rules
def isValidMove(sudoku_board, pos, move):
# Defining in which row the position is from, Note that I am dividing by
# 9 because there are 9 rows
row = floor(pos / 9)
# Defining in which column the position is from, Note that I am doing a
# modulus by
# 9 because there are 9 columns
col = pos % 9
#Divide the board into 9 chunks, each of 3x3. I will number them 1 to 9,
#from left to right, top to bottom.
#Determining which square position belongs to
square = get_pos_square(pos,row,col)
#Check row
check_row = isRowValid(sudoku_board,row,move)
#Check column
check_col = isColValid(sudoku_board,col,move)
#Check respective 3x3 square
check_square = isSquareValid(sudoku_board,square,move)
# Move is valid, only if the three previous conditions are true
check_move = check_row and check_col and check_square
return check_move
# This function will return in which 'square' the position belongs to, see
# comments in function for more details
def get_pos_square(pos, row, col):
# For some reason, the value of row/col was negative or higher than 8
# when the board size is 9x9
# Exit immediately
# This code should not ever run, but it makes it robust and good for
# testing/debugging
if (row < 0 or col < 0 or row > 8 or col > 8):
print("ERROR: row is " + str(row) + ", col is " + str(col))
exit()
# Obtain square or sector of position, for reference:
# 1|2|3
# 4|5|6
# 7|8|9
if row < 3:
if col < 3:
square = 1
elif col < 6:
square = 2
elif col < 9:
square = 3
elif row < 6:
if col < 3:
square = 4
elif col < 6:
square = 5
elif col < 9:
square = 6
elif row < 9:
if col < 3:
square = 7
elif col < 6:
square = 8
elif col < 9:
square = 9
return square
# This function verifies if the move is valid in its respective row,
# according to traditional sudoku rules
def isRowValid(sudoku_board,row, move):
for entry in sudoku_board[row * 9:(row * 9 + 9)]:
if entry == move:
return False
return True
# This function verifies if the move is valid in its respective column,
# according to traditional sudoku rules
def isColValid(sudoku_board,col, move):
for newRow in range(0,9):
if sudoku_board[col + newRow * 9] == move:
return False
return True
# This function verifies if the move is valid in its respective square,
# according to traditional sudoku rules
def isSquareValid(sudoku_board,square, move):
# Set offset depending on the square
if square < 4:
rowSquareInitial = 0
elif square < 7:
rowSquareInitial = 27
else:
rowSquareInitial = 54
for col in range(0,3):
for row in range(0,3):
# rowSquareInitial is the offset
# ((square-1)%3)*3 is the 'square column' between the 9 squares
# col*9 is the iteration between columns in respective square and
# board
# row is the iteration between row in respective square and board
if sudoku_board[rowSquareInitial + ((square - 1) % 3) * 3 + col * 9 + row] == move:
return False
return True
# This function will update the board
def update_board(sudoku_board,pos,move):
sudoku_board[pos] = move
# This function prints a dashed line to separate between each row
def print_line():
print("\n ------------------------------------- ")
#This function prints the state of the sudoku board
def print_sudoku_board(sudoku_board):
# Horizontal bar
print_line()
# Prints row
for entry, count in zip(sudoku_board, range(1,len(sudoku_board) + 1)):
if count % 9 == 1:
print(end = " | ")
print(str(entry), end=" | ")
# Start new row
if (count % 9) == 0:
print_line()
print()
# This function will solve any solvable sudoku using an algorithm called
# backtracking
# Variables for when a board takes too long to solve, likely because it is not
# solvable
debug_counter = []
debug_counter.append(0)
def solve_sudoku(sudoku_board):
#Given an empty board, return false
if len(sudoku_board) == 0:
return False
#Base case: Stop recursing when board is filled
if not find_an_empty_square(sudoku_board):
return True
#Save current empty position
pos = empty_pos[0]
#Increase recursion iteration
debug_counter[0]+=1
#Recursive cases:
# Iterate from 1 to 9 inclusive because these are the only move permitted
# in traditional Sudoku
for move in range(1,10):
#If temptitive move is not allowed, try the next move
#else enter statement
if isValidMove(sudoku_board,pos,move):
update_board(sudoku_board,pos,move)
if solve_sudoku(sudoku_board):
return True
#Limit the amount of moves to 15000, if passed, took too long to
#solve, likely is not a valid board
elif debug_counter[0] > 15000:
debug_counter[0] = 0
return False
# Wrong move, try the next one
update_board(sudoku_board,pos,0)
# Backtracking occurs
return False
# This function will generate a a solvable sudoku board ramdomly
# How to proceed:
# User will enter difficulty between "easy", "normal" "hard", number of slots
# filled
# changes with difficulty -> see method "difficulty_multiplier"
# Ramdomly will generate moves on an empty board, once all moves, if board is
# valid, return board
# Else restart
def generate_board(sudoku_board, difficulty = "easy"):
# Initialize empty board
init_empty(sudoku_board)
game_board = []
# Get difficulty multiplier and save a backup
temp_multiplier = difficulty_multiplier(difficulty)
# Loop until the board is fully generated
while True:
#Randomly generate positions and moves and test them, if it works
#update
#board
while True:
#Randomly generate a position and a move to play
pos = position_generator()
move = move_generator()
#Check if randomly generated move is valid, if so update board
if sudoku_board[pos] == 0 and isValidMove(sudoku_board,pos,move):
update_board(sudoku_board,pos,move)
break
# Decrease remaining moves to put on board counter
temp_multiplier -= 1
#Once all moves are on the board, this changes according to difficulty
# check if board is valid, and if so return board
# Else, restart from scratch
if temp_multiplier < 1:
game_board = sudoku_board.copy()
return game_board
# This function will verify is a sudoku board has a solution
def is_valid_board(sudoku_board, temp_board):
if solve_sudoku(temp_board):
return True
else:
# Sudoku board does have a solution, clear the board
sudoku_board.clear()
temp_board.clear()
return False
# This function will return randomly a value between 0 and 80 inclusive
# which is the size of a 9x9 sudoku
def position_generator():
return randint(0,80)
# This function will return randomly a value between 1 and 9 inclusive
# which are the moves permitted in a 9x9 sudoku
def move_generator():
return randint(1,9)
# This function returns the number of sudoku moves a board will start with
# depending on difficulty passed
def difficulty_multiplier(difficulty):
if difficulty.lower() == "easy":
return 18
elif difficulty.lower() == "normal":
return 14
elif difficulty.lower() == "hard":
return 11
else:
print("ERROR: Difficulty parameter is not within permitted choices")
exit()
# This function returns a sudoku board with its solution.
# It will first try to generate a brand new sudoku, if after 2 attempts it fails, the function
# will return a premade board stored in a file respective to the difficulty. If a new board
# is created, store that board and its solution in its file.
#
# generating is used for the script "script.py" when it is wanted to
# store several sudokus with solution.
def init_board(sudo = [],sol = [],lvl = "easy", generating = False):
#Initialize sudoku board and its solution, by first randomly generating a
#solvable sudoku
print("Loading, generating a sudoku board")
#Declare and initialize Sudoku
sol = generate_board(sol, lvl)
sudo = sol.copy()
#Declare and initilize attempts
#Note: There are 3 total attempts
#Once attempts have run out, retrieve a pre-generated board
attempts = 1
while attempts <= 2 or generating:
if is_valid_board(sol,sudo):
#A valid board was generated, store it in file and return board and solution as a tuple
game_board = hole_in_board(sol,sudo)
filename = lvl.capitalize() + "Sudoku.txt"
sudokuFile = open(filename, "a")
sudokuFile.write(toString_sudoku(game_board))
sudokuFile.write(toString_sudoku(sudo))
sudokuFile.write("\n")
sudokuFile.close()
#Note that game_board HERE and ONLY HERE is the game board
#and sudo it the solution
return game_board,sudo
else:
#Unsuccessful attempt re-init board
print("Try again, Attempt #" + str(attempts))
attempts +=1
sol = generate_board(sol, lvl)
sudo = sol.copy()
#If it passes here, it means that a board took to long to generate
#and a board will be retrieve from a file where boards are stored respective of level
#The board is selected randomly from the file
#Open file stream
filename = lvl.capitalize() + "Sudoku.txt"
sudokuFile = open(filename, "r")
#Reset file pointer to the beginning of the file
sudokuFile.seek(0,0)
#Count number of lines in file
num_lines = 0
for line in sudokuFile:
num_lines+=1
#Reset file pointer to the beginning of the file
sudokuFile.seek(0,0)
#Number of games
num_game = int(num_lines/3)
#Randomly select one board from the file
chosen_game = (randint(0,num_game))*3 +1
chosen_sol = chosen_game + 1
#Move file pointer to desired sudoku
for line in range(0,chosen_game-1):
sudokuFile.readline()
#Store game board and solution as strings
tempsudo = sudokuFile.readline()
tempsol = sudokuFile.readline()
#Convert string from file to a Sudoku board
sudo = convert_string_to_list(tempsudo)
sol = convert_string_to_list(tempsol)
#Return pre-generated sudoku
sudokuFile.close()
return sudo,sol
# This function converts the sudoku as a string delimited by commas
# EX. [1,2,3,4,5] -> "1,2,3,4,5"
# Here it is used to store a sudoku into a file
def toString_sudoku(sudoku_board):
temp = ""
# Prints row
for index in range(0,len(sudoku_board)):
if index == 0:
temp = temp + str(sudoku_board[index])
else:
temp = (temp + "," + str(sudoku_board[index]))
temp = temp + "\n"
return temp
# This function converts a string into a list
# Here it is used to retrieve a sudoku or a solution from a file
def convert_string_to_list(string):
list = []
temp = string.split(",")
for i in temp:
list.append(int(i))
return list
# This function helps generating a sudoku make faster
# Idea: Have a premade board with a solution and makes holes
# Why: Building a board from scratch has high chance of generating sudokus with no solution
# Returns a sudoku board
def hole_in_board(sudoku,sol):
limit_tries = 200
number_of_holes = 25;
while number_of_holes != 0:
#Randomly generate a position and a move to play
pos = position_generator()
move = move_generator()
if sudoku[pos] == 0:
sudoku[pos] = sol[pos]
number_of_holes -=1
limit_tries -=1
if limit_tries < 0:
break
return sudoku
|
import cv2
from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize
import os
from PIL import Image
from encode1 import QR_code
import sys
img_root = os.getcwd() # 获取当前路径
def Video2Pic(videopath):
videoPath = img_root + '/' + videopath # 读取视频路径
folder_name = 'output' # 定义新建文件夹名
if os.path.isdir(img_root):
os.mkdir(os.path.join(img_root, folder_name)) # 新建文件
imgPath = img_root + "/output/" # 保存图片路径
cap = cv2.VideoCapture(videoPath)
fps = cap.get(cv2.CAP_PROP_FPS) # 获取帧率
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 获取宽度
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 获取高度
if not cap.isOpened(): # 是否成功打开
print("Please check the path.")
frame_interval =9
frame_count = 0
cnt = 0
while 1:
suc, frame = cap.read()
cnt += 1
frame_count += 1
if not suc:
break
cv2.imwrite(imgPath + "%02d.png" % frame_count, frame)
cv2.waitKey(1)
print("视频转图片成功")
cap.release()
if __name__ == '__main__':
a = QR_code()
Video2Pic()
|
'''
Author: hiocde
Email: hiocde@gmail.com
Start: 1.16.17
Completion:
Original: I found the fourth reading data method! not placeholder,quque-pipeline and constant
described by official docs, I used API tf.pack to build myself auto input pipeline!
For another, I used completely tf ops for image not using opencv.
Domain: main_dir{sub_dir{same class raw images}...}
'''
import os
import numpy as np
import tensorflow as tf
# Global constants describing the Paris Dataset.
# A epoch is one cycle which train the whole training set.
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 6412
class ImageSet:
def __init__(self, img_main_dir, generalized=True):
self.examples= self.create_examples(img_main_dir)
np.random.shuffle(self.examples)
self.images_nm, self.labels= zip(*self.examples) #separate images'name from label
self.num_exps= len(self.examples)
self.num_class= len(set(self.labels))
self.pointer= 0 #pointer points next example
self.generalized= generalized #generalized source images?
def has_next_exp(self):
return self.pointer< self.num_exps
def next_exp(self):
if not self.has_next_exp():
np.random.shuffle(self.examples)
self.pointer=0
self.images_nm, self.labels= zip(*self.examples)
label= self.labels[self.pointer]
image= self.img_read(self.images_nm[self.pointer]) #3d-tensor
distorted_image= self.distort_image(image)
self.pointer+=1
return distorted_image, label
def next_batch(self, batch_size):
'''return images, labels and ids(path) as batch'''
batch=[]
for i in range(batch_size):
exp= self.next_exp()
batch.append(exp)
images,labels= zip(*batch)
ids= self.images_nm[self.pointer-batch_size : self.pointer]
return tf.pack(images), tf.pack(labels), ids #pack to get a 4d-tensor input for inference and 1d-tensor labels for loss
def create_examples(self, img_main_dir):
'''Args:
img_main_dir: includes sub_dirs, each sub_dir is a class of images
Return:
all images path and their labels(0-n-1),a list of tuple
'''
examples=[]
for sub_dir in os.listdir(img_main_dir):
class_index= int(sub_dir.split('#')[-2]) #because I appended class index to sub_dir
for img_name in os.listdir(os.path.join(img_main_dir, sub_dir)):
examples.append((os.path.join(img_main_dir,sub_dir,img_name), class_index))
return examples
def img_read(self, img_path):
'''Brief:
Directly use tf's op to read and change img not opencv
Return:
a 3d-tensor of img, dtype=uint8, shape: [h,w,d]
'''
print(img_path)
return tf.image.decode_jpeg(tf.read_file(img_path),3)
def distort_image(self, img):
'''Imitate tf's cifar10 sample'''
distorted_image = tf.cast(img, tf.float32)
#It's fun that 224*224 input size in Alex's paper, but in fact is 227*227.( (224 - 11)/4 + 1 is quite clearly not an integer)
distorted_image = tf.image.resize_images(distorted_image, [227, 227])
if self.generalized:
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image,max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,lower=0.2,upper=1.8)
return tf.image.per_image_standardization(distorted_image)
### Test
# trainset= ImageSet('/mnt/g/machine_learning/dataset/Alexnet_tf/paris')
# x,y=trainset.next_batch(32)
# print(x)
# print(y)
# #print:
# # Tensor("pack_32:0", shape=(32, 227, 227, 3), dtype=float32)
# # Tensor("pack_33:0", shape=(32,), dtype=int32)
# #good job! A auto input pipeline
|
#!/usr/bin/env python
# Looking into different sampling schemes to give "sparse scaling"
# (which, paradoxically, is better for small subnetwork inference).
# Daniel Klein, 5/1/2013
import numpy as np
from Network import network_from_edges
from Experiment import RandomSubnetworks, Results, add_array_stats
# Parameters
params = { 'N': 400,
'D': 5,
'num_reps': 5,
'sub_sizes': np.arange(10, 110, 10, dtype = np.int),
'sampling_methods': ['random_node', 'random_edge',
'link_trace', 'link_trace_f'],
'plot_network': True }
# Set random seed for reproducible output
np.random.seed(137)
# Initialize full network
blocks = params['N'] / params['D']
edges = []
for block in range(blocks):
for i in range(params['D']):
v_1 = 'n_%d' % (block * params['D'] + i)
for j in range(params['D']):
v_2 = 'n_%d' % (((block + 1) * params['D'] + j) % params['N'])
edges.append((v_1, v_2))
net = network_from_edges(edges)
# Set up recording of results from experiment
results_by_method = { }
for method_name in params['sampling_methods']:
results = Results(params['sub_sizes'], params['sub_sizes'],
params['num_reps'], title = method_name)
add_array_stats(results, network = True)
results.new('# Active', 'n',
lambda n: np.isfinite(n.offset.matrix()).sum())
results_by_method[method_name] = results
for sub_size in params['sub_sizes']:
size = (sub_size, sub_size)
print 'subnetwork size = %s' % str(size)
generators = \
{ 'random_node': RandomSubnetworks(net, size, method = 'node'),
'random_edge': RandomSubnetworks(net, size, method = 'edge'),
'link_trace': RandomSubnetworks(net, size, method = 'link'),
'link_trace_f': RandomSubnetworks(net, size, method = 'link_f') }
for generator in generators:
if not generator in params['sampling_methods']: continue
print generator
for rep in range(params['num_reps']):
subnet = generators[generator].sample(as_network = True)
subnet.offset_extremes()
results_by_method[generator].record(size, rep, subnet)
# Output results
print
for method_name in params['sampling_methods']:
print method_name
results = results_by_method[method_name]
results.summary()
if params['plot_network']:
results.plot([('Density', {'ymin': 0, 'plot_mean': True}),
(['Out-degree', 'Max row-sum', 'Min row-sum'],
{'ymin': 0, 'plot_mean': True}),
(['In-degree', 'Max col-sum', 'Min col-sum'],
{'ymin': 0, 'plot_mean': True}),
('Self-loop density', {'ymin': 0, 'plot_mean': True}),
('# Active', {'ymin': 0 })])
print
# Report parameters for the run
print 'Parameters:'
for field in params:
print '%s: %s' % (field, str(params[field]))
|
import Utils_Intersect as Intersect
import Entities_Marchand as Marchand
import Entities_Player as Player
import Entities_Tile as Tile
import Entities_Item as Item
import Utils_Position as Position
import Utils_Size as Size
import Entities_Inventory as Inventory
import pygame
class ScriptMarchand:
def __init__(self):
self.scenes = []
self.items = None
self.pognon = None
self.joueur = None
self.uiTexture = None
self.assetTexture = None
self.__ActifUI = 0
def update(self):
for scene in self.scenes:
if scene.state == True:
for marchand in scene.marchands:
if Intersect.intersectXY(self.joueur.position, self.joueur.colBox, marchand.pos, marchand.colBox):
# Ouverture du menu de vente
if self.joueur.state == Player.PlayerState.PLANTATION:
if marchand.etat == Marchand.EtatMarchand.INACTIF:
self.joueur.state = Player.PlayerState.DISCUTION
marchand.etat = Marchand.EtatMarchand.ACTIF
ui = Item.Item(self.uiTexture)
ui.position = Position.Position(32, 32)
ui.size = Size.Size(96, 64)
ui.quantity = 0
scene.ui.append(ui)
produit0 = Item.Item(self.assetTexture)
produit0.position = Position.Position(48, 48)
produit0.texPos = Position.Position(224, 16)
produit0.quantity = 5
__ActifUI = 0
scene.ui.append(produit0)
produit1 = Item.Item(self.assetTexture)
produit1.position = Position.Position(48, 64)
produit1.texPos = Position.Position(240, 0)
produit1.quantity = 1
scene.ui.append(produit1)
# Fermeture du menu de vente
elif self.joueur.state == Player.PlayerState.SWITCH_ITEM:
if marchand.etat == Marchand.EtatMarchand.ACTIF:
marchand.etat = Marchand.EtatMarchand.INACTIF
scene.ui.clear()
# Action du menu de vente
if marchand.etat == Marchand.EtatMarchand.ACTIF:
# Deplacement bas dans le menu
if marchand.inventaireMarchand == Marchand.InventaireMarchand.INVENTAIRE_BAS:
self.__ActifUI = 1
scene.ui[2].texPos.y = 16
scene.ui[1].texPos.y = 0
marchand.inventaireMarchand = Marchand.InventaireMarchand.INVENTAIRE_RIEN
# Deplacement haut dans le menu
if marchand.inventaireMarchand == Marchand.InventaireMarchand.INVENTAIRE_HAUT:
self.__ActifUI = 0
scene.ui[2].texPos.y = 0
scene.ui[1].texPos.y = 16
marchand.inventaireMarchand = Marchand.InventaireMarchand.INVENTAIRE_RIEN
# Achat dans le menu
if marchand.inventaireMarchand == Marchand.InventaireMarchand.INVENTAIRE_ACHAT:
if self.__ActifUI == 0: # pasteque
for item in self.items.itemGraines:
if item.Item == Item.Items.GRN_PASTEQUE:
if self.pognon.pognon >= 5:
item.quantity += 1
self.pognon.pognon -= 5
if self.__ActifUI == 1: # piment
for item in self.items.itemGraines:
if item.Item == Item.Items.GRN_PIMENT:
if self.pognon.pognon >= 1:
item.quantity += 1
self.pognon.pognon -= 1
marchand.inventaireMarchand = Marchand.InventaireMarchand.INVENTAIRE_RIEN
|
from nodes import ImageNode, MixNode, OutputNode
from renderer import Renderer
# Init renderer
renderer = Renderer(None)
# Init nodes with ids
img_nd1 = ImageNode(1)
img_nd2 = ImageNode(2)
mix_nd = MixNode(3)
output_nd = OutputNode(4)
# Create a dict of nodes which the renderer accepts
nodes = {
"img_nd1": img_nd1,
"img_nd2": img_nd2,
"mix_nd": mix_nd,
"output_nd": output_nd
}
# Edit the values (properties) and connections (parameters) of the nodes
img_nd1.EditProperty("file_path", "./test1.jpg")
img_nd2.EditProperty("file_path", "./test2.jpg")
mix_nd.EditParameter("image_1", img_nd1)
mix_nd.EditParameter("image_2", img_nd2)
output_nd.EditParameter("image", mix_nd)
# Render 1
mix_nd._dirty_flag = True
mix_nd.EditProperty("blend_mode", 1)
print("Render result 1: ", renderer.Render(nodes))
# Render 2
mix_nd._dirty_flag = True
mix_nd.EditProperty("blend_mode", 10)
print("Render result 2: ", renderer.Render(nodes))
# Render 3
mix_nd._dirty_flag = True
mix_nd.EditProperty("blend_mode", 1)
print("Render result 3: ", renderer.Render(nodes))
|
""" Contains a class for logic of the evoked topomap dialog.
"""
from PyQt5 import QtWidgets
from meggie.actions.evoked_plot_topomap.dialogs.evokedTopomapDialogUi import Ui_evokedTopomapDialog
from meggie.utilities.messaging import exc_messagebox
class EvokedTopomapDialog(QtWidgets.QDialog):
""" Contains logic for the evoked topomap dialog.
"""
def __init__(self, parent, evoked, handler):
QtWidgets.QDialog.__init__(self, parent)
self.ui = Ui_evokedTopomapDialog()
self.ui.setupUi(self)
self.handler = handler
self.evoked = evoked
times = list(evoked.content.values())[0].times
tmin, tmax = times[0], times[-1]
self.ui.doubleSpinBoxStart.setValue(tmin)
self.ui.doubleSpinBoxStart.setMinimum(tmin)
self.ui.doubleSpinBoxStart.setMaximum(tmax)
self.ui.doubleSpinBoxEnd.setMinimum(tmin)
self.ui.doubleSpinBoxEnd.setMaximum(tmax)
self.ui.doubleSpinBoxEnd.setValue(tmax)
def accept(self):
tmin = self.ui.doubleSpinBoxStart.value()
tmax = self.ui.doubleSpinBoxEnd.value()
step = self.ui.doubleSpinBoxStep.value()
radius = None
if self.ui.checkBoxRadius.isChecked():
radius = self.ui.doubleSpinBoxRadius.value()
try:
self.handler(tmin, tmax, step, radius)
except Exception as exc:
exc_messagebox(self.parent, exc)
self.close()
|
import numpy as np
import os
import matplotlib.pyplot as plt
from print_values import *
from plot_data_all_phonemes import *
from plot_data import *
import random
from sklearn.preprocessing import normalize
from get_predictions import *
from plot_gaussians import *
# File that contains the data
data_npy_file = 'data/PB_data.npy'
# Loading data from .npy file
data = np.load(data_npy_file, allow_pickle=True)
data = np.ndarray.tolist(data)
# Make a folder to save the figures
figures_folder = os.path.join(os.getcwd(), 'figures')
if not os.path.exists(figures_folder):
os.makedirs(figures_folder, exist_ok=True)
# Array that contains the phoneme ID (1-10) of each sample
phoneme_id = data['phoneme_id']
# frequencies f1 and f2
f1 = data['f1']
f2 = data['f2']
# Initialize array containing f1 & f2, of all phonemes.
X_full = np.zeros((len(f1), 2))
#########################################
# Write your code here
# Store f1 in the first column of X_full, and f2 in the second column of X_full
X_full[:, 0] = f1
X_full[:, 1] = f2
########################################/
X_full = X_full.astype(np.float32)
# number of GMM components
k = 3
#########################################
# Write your code here
# Create an array named "X_phonemes_1_2", containing only samples that belong to phoneme 1 and samples that belong to phoneme 2.
# The shape of X_phonemes_1_2 will be two-dimensional. Each row will represent a sample of the dataset, and each column will represent a feature (e.g. f1 or f2)
# Fill X_phonemes_1_2 with the samples of X_full that belong to the chosen phonemes
# To fill X_phonemes_1_2, you can leverage the phoneme_id array, that contains the ID of each sample of X_full
# X_phonemes_1_2 = ...
# y is the target phoneme value
y = np.array([])
X_phonemes_1_2 = np.empty((0, 2))
for i, phoneme in enumerate(phoneme_id):
if phoneme == 1 or phoneme == 2:
X_phonemes_1_2 = np.append(X_phonemes_1_2, [X_full[i]], axis=0)
y = np.append(y, phoneme)
########################################/
# Plot array containing the chosen phonemes
# Create a figure and a subplot
fig, ax1 = plt.subplots()
title_string = 'Phoneme 1 & 2'
# plot the samples of the dataset, belonging to the chosen phoneme (f1 & f2, phoneme 1 & 2)
plot_data(X=X_phonemes_1_2, title_string=title_string, ax=ax1)
# save the plotted points of phoneme 1 as a figure
plot_filename = os.path.join(os.getcwd(), 'figures', 'dataset_phonemes_1_2.png')
plt.savefig(plot_filename)
#########################################
# Write your code here
# Get predictions on samples from both phonemes 1 and 2, from a GMM with k components, pretrained on phoneme 1
# Get predictions on samples from both phonemes 1 and 2, from a GMM with k components, pretrained on phoneme 2
# Compare these predictions for each sample of the dataset, and calculate the accuracy, and store it in a scalar variable named "accuracy"
# as dataset X, we will use only the samples of the chosen phoneme
X = X_phonemes_1_2.copy()
# get number of samples
N = X.shape[0]
# change k here
k = 3
GMM_file_01 = 'data/GMM_params_phoneme_01_k_{:02}.npy'.format(k)
# Loading data from .npy file
GMM_parameters_1 = np.load(GMM_file_01, allow_pickle=True)
GMM_parameters_1 = GMM_parameters_1.item()
mu_1 = GMM_parameters_1['mu']
s_1 = GMM_parameters_1['s']
p_1 = GMM_parameters_1['p']
# Initialize array Z that will get the predictions of each Gaussian on each sample
Z_1 = np.zeros((N,k))
Z_1 = get_predictions(mu_1, s_1, p_1, X)
sum1 = Z_1.sum(axis=1)
GMM_file_02 = 'data/GMM_params_phoneme_02_k_{:02}.npy'.format(k)
# Loading data from .npy file
GMM_parameters_2 = np.load(GMM_file_02, allow_pickle=True)
GMM_parameters_2 = GMM_parameters_2.item()
mu_2 = GMM_parameters_2['mu']
s_2 = GMM_parameters_2['s']
p_2 = GMM_parameters_2['p']
# Initialize array Z that will get the predictions of each Gaussian on each sample
Z_2 = np.zeros((N,k))
Z_2 = get_predictions(mu_2, s_2, p_2, X)
sum2 = Z_2.sum(axis=1)
prediction = np.array([])
for i in range(y.size):
if sum1[i] > sum2[i]:
prediction = np.append(prediction, 1)
else:
prediction = np.append(prediction, 2)
accuracy = (sum(np.equal(prediction, y)) / y.size) * 100
########################################/
print('Accuracy using GMMs with {} components: {:.2f}%'.format(k, accuracy))
################################################
# enter non-interactive mode of matplotlib, to keep figures open
plt.ioff()
plt.show()
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
# Check for presence of harfbuzz-icu library, use it if present.
'harfbuzz_libraries':
'<!(python <(DEPTH)/tools/compile_test/compile_test.py '
'--code "int main() { return 0; }" '
'--run-linker '
'--on-success "harfbuzz harfbuzz-icu" '
'--on-failure "harfbuzz" '
'-- -lharfbuzz-icu)',
},
'targets': [
{
'target_name': 'harfbuzz-ng',
'type': 'none',
'cflags': [
'<!@(pkg-config --cflags <(harfbuzz_libraries))',
],
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags <(harfbuzz_libraries))',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other <(harfbuzz_libraries))',
],
'libraries': [
'<!@(pkg-config --libs-only-l <(harfbuzz_libraries))',
],
},
'variables': {
'headers_root_path': 'src',
'header_filenames': [
'hb.h',
],
},
'includes': [
'../../build/shim_headers.gypi',
],
},
],
}
|
"""
Mike Kroutikov, (c) 2014
Test QuickBot motors.
Run this code on the BeagleBone side.
It will run wheel motors in a simple pattern:
1. Left motor runs forward for 5 seconds, then stops
2. Left motor runs in reverse for about 5 seconds, then stops
3. Right motor runs forward for 5 secs, then stops
4. Right motor runs in reverse for 5 secs, then stops.
"""
import contextlib
import time
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
import Adafruit_BBIO.ADC as ADC
@contextlib.contextmanager
def motor_setup(dir1_pin, dir2_pin, pwm_pin):
"""
Sets up context for operating a motor.
"""
# Initialize GPIO pins
GPIO.setup(dir1_pin, GPIO.OUT)
GPIO.setup(dir2_pin, GPIO.OUT)
# Initialize PWM pins: PWM.start(channel, duty, freq=2000, polarity=0)
PWM.start(pwm_pin, 0)
def run_motor(speed):
if speed > 100:
speed = 100
elif speed < -100:
speed = -100
if speed > 0:
GPIO.output(dir1_pin, GPIO.LOW)
GPIO.output(dir2_pin, GPIO.HIGH)
PWM.set_duty_cycle(pwm_pin, abs(speed))
elif speed < 0:
GPIO.output(dir1_pin, GPIO.HIGH)
GPIO.output(dir2_pin, GPIO.LOW)
PWM.set_duty_cycle(pwm_pin, abs(speed))
else:
GPIO.output(dir1_pin, GPIO.LOW)
GPIO.output(dir2_pin, GPIO.LOW)
PWM.set_duty_cycle(pwm_pin, 0)
yield run_motor
GPIO.cleanup()
PWM.cleanup()
if __name__ == '__main__':
import config
print '====== Testing Quick Bot ======='
print
print 'Left motor should follow commands'
print
with motor_setup(*config.LEFT_MOTOR_PINS) as run:
print 'Left motor: Run forward'
run(50)
time.sleep(5)
print 'Left motor: Stop'
run(0)
time.sleep(2)
print 'Left motor: Reverse'
run(-50)
time.sleep(5)
print 'Left motor: Stop'
run(0)
print
print 'Right motor should follow commands'
print
with motor_setup(*config.RIGHT_MOTOR_PINS) as run:
print 'Right motor: Run forward'
run(50)
time.sleep(5)
print 'Right motor: Stop'
run(0)
time.sleep(2)
print 'Right motor: Reverse'
run(-50)
time.sleep(5)
print 'Right motor: Stop'
run(0)
|
#!/usr/bin/env python
import numpy as np
Configs = {}
class CanonicalConfig:
def __init__(self):
self.width = 368
self.height = 368
self.stride = 8
self.parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne", "Lank", "Reye", "Leye", "Rear", "Lear"]
self.num_parts = len(self.parts)
self.parts_dict = dict(zip(self.parts, range(self.num_parts)))
self.parts += ["background"]
self.num_parts_with_background = len(self.parts)
leftParts, rightParts = CanonicalConfig.ltr_parts(self.parts_dict)
self.leftParts = leftParts
self.rightParts = rightParts
# this numbers probably copied from matlab they are 1.. based not 0.. based
self.limb_from = ['neck', 'Rhip', 'Rkne', 'neck', 'Lhip', 'Lkne', 'neck', 'Rsho', 'Relb', 'Rsho', 'neck', 'Lsho', 'Lelb', 'Lsho',
'neck', 'nose', 'nose', 'Reye', 'Leye']
self.limb_to = ['Rhip', 'Rkne', 'Rank', 'Lhip', 'Lkne', 'Lank', 'Rsho', 'Relb', 'Rwri', 'Rear', 'Lsho', 'Lelb', 'Lwri', 'Lear',
'nose', 'Reye', 'Leye', 'Rear', 'Lear']
self.limb_from = [ self.parts_dict[n] for n in self.limb_from ]
self.limb_to = [ self.parts_dict[n] for n in self.limb_to ]
assert self.limb_from == [x-1 for x in [2, 9, 10, 2, 12, 13, 2, 3, 4, 3, 2, 6, 7, 6, 2, 1, 1, 15, 16]]
assert self.limb_to == [x-1 for x in [9, 10, 11, 12, 13, 14, 3, 4, 5, 17, 6, 7, 8, 18, 1, 15, 16, 17, 18]]
self.limbs_conn = list(zip(self.limb_from, self.limb_to))
self.paf_layers = 2*len(self.limbs_conn)
self.heat_layers = self.num_parts
self.num_layers = self.paf_layers + self.heat_layers + 1
self.paf_start = 0
self.heat_start = self.paf_layers
self.bkg_start = self.paf_layers + self.heat_layers
#self.data_shape = (self.height, self.width, 3) # 368, 368, 3
self.mask_shape = (self.height//self.stride, self.width//self.stride) # 46, 46
self.parts_shape = (self.height//self.stride, self.width//self.stride, self.num_layers) # 46, 46, 57
class TransformationParams:
def __init__(self):
self.target_dist = 0.6;
self.scale_prob = 1; # TODO: this is actually scale unprobability, i.e. 1 = off, 0 = always, not sure if it is a bug or not
self.scale_min = 0.5;
self.scale_max = 1.1;
self.max_rotate_degree = 40.
self.center_perterb_max = 40.
self.flip_prob = 0.5
self.sigma = 7.
self.paf_thre = 8. # it is original 1.0 * stride in this program
self.transform_params = TransformationParams()
@staticmethod
def ltr_parts(parts_dict):
# when we flip image left parts became right parts and vice versa. This is the list of parts to exchange each other.
leftParts = [ parts_dict[p] for p in ["Lsho", "Lelb", "Lwri", "Lhip", "Lkne", "Lank", "Leye", "Lear"] ]
rightParts = [ parts_dict[p] for p in ["Rsho", "Relb", "Rwri", "Rhip", "Rkne", "Rank", "Reye", "Rear"] ]
return leftParts,rightParts
class COCOSourceConfig:
def __init__(self, hdf5_source):
self.hdf5_source = hdf5_source
self.parts = ['nose', 'Leye', 'Reye', 'Lear', 'Rear', 'Lsho', 'Rsho', 'Lelb',
'Relb', 'Lwri', 'Rwri', 'Lhip', 'Rhip', 'Lkne', 'Rkne', 'Lank',
'Rank']
self.num_parts = len(self.parts)
# for COCO neck is calculated like mean of 2 shoulders.
self.parts_dict = dict(zip(self.parts, range(self.num_parts)))
def convert(self, meta, global_config):
joints = np.array(meta['joints'])
assert joints.shape[1] == len(self.parts)
result = np.zeros((joints.shape[0], global_config.num_parts, 3), dtype=np.float)
result[:,:,2]=3. # OURS - # 3 never marked up in this dataset, 2 - not marked up in this person, 1 - marked and visible, 0 - marked but invisible
for p in self.parts:
coco_id = self.parts_dict[p]
if p in global_config.parts_dict:
global_id = global_config.parts_dict[p]
assert global_id!=1, "neck shouldn't be known yet"
result[:,global_id,:]=joints[:,coco_id,:]
if 'neck' in global_config.parts_dict:
neckG = global_config.parts_dict['neck']
RshoC = self.parts_dict['Rsho']
LshoC = self.parts_dict['Lsho']
# no neck in coco database, we calculate it as average of shoulders
# TODO: we use 0 - hidden, 1 visible, 2 absent - it is not coco values they processed by generate_hdf5
both_shoulders_known = (joints[:, LshoC, 2]<2) & (joints[:, RshoC, 2] < 2)
result[~both_shoulders_known, neckG, 2] = 2. # otherwise they will be 3. aka 'never marked in this dataset'
result[both_shoulders_known, neckG, 0:2] = (joints[both_shoulders_known, RshoC, 0:2] +
joints[both_shoulders_known, LshoC, 0:2]) / 2
result[both_shoulders_known, neckG, 2] = np.minimum(joints[both_shoulders_known, RshoC, 2],
joints[both_shoulders_known, LshoC, 2])
meta['joints'] = result
return meta
def convert_mask(self, mask, global_config, joints = None):
mask = np.repeat(mask[:,:,np.newaxis], global_config.num_layers, axis=2)
return mask
def source(self):
return self.hdf5_source
# more information on keypoints mapping is here
# https://github.com/ZheC/Realtime_Multi-Person_Pose_Estimation/issues/7
Configs["Canonical"] = CanonicalConfig
def GetConfig(config_name):
config = Configs[config_name]()
dct = config.parts[:]
dct = [None]*(config.num_layers-len(dct)) + dct
for (i,(fr,to)) in enumerate(config.limbs_conn):
name = "%s->%s" % (config.parts[fr], config.parts[to])
print(i, name)
x = i*2
y = i*2+1
assert dct[x] is None
dct[x] = name + ":x"
assert dct[y] is None
dct[y] = name + ":y"
from pprint import pprint
pprint(dict(zip(range(len(dct)), dct)))
return config
if __name__ == "__main__":
# test it
foo = GetConfig("Canonical")
print(foo.paf_layers, foo.heat_layers)
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint for a cron job to run bisects integration tests."""
import datetime
import time
from google.appengine.api import mail
from dashboard import auto_bisect
from dashboard import request_handler
from dashboard import start_try_job
from dashboard.common import datastore_hooks
from dashboard.common import stored_object
from dashboard.common import utils
from dashboard.models import try_job
_BISECT_FYI_CONFIGS_KEY = 'bisect_fyi_config_map'
_TEST_FAILURE_TEMPLATE = """
Test Name: %(test_name)s
Error:%(error)s
Bisect Config: %(info)s
"""
class BisectFYIHandler(request_handler.RequestHandler):
"""URL endpoint for a cron job to run bisects integration tests."""
def get(self):
"""A get request is the same a post request for this endpoint."""
self.post()
def post(self):
"""Runs auto bisects."""
datastore_hooks.SetPrivilegedRequest()
_RunBisectIntegrationTests()
def _RunBisectIntegrationTests():
"""Runs bisect jobs with pre determined configs."""
errors_list = {}
bisect_fyi_configs = stored_object.Get(_BISECT_FYI_CONFIGS_KEY)
for test_name, config in bisect_fyi_configs.iteritems():
if config.get('bisect_config'):
results = _StartBisectFYIJob(test_name, config.get('bisect_config'))
if 'error' in results:
errors_list[test_name] = {
'error': results['error'],
'info': config.get('bisect_config')}
else:
errors_list[test_name] = {'error': 'Missing bisect config.'}
if errors_list:
_SendEmailAlert(errors_list)
def _StartBisectFYIJob(test_name, bisect_config):
"""Re-starts a bisect-job after modifying it's config based on run count.
Args:
test_name: Name of the test case.
bisect_job: TryJob entity with initialized bot name and config.
Returns:
If successful, a dict containing "issue_id" and "issue_url" for the
bisect job. Otherwise, a dict containing "error", with some description
of the reason why a job wasn't started.
"""
try:
bisect_job = _MakeBisectFYITryJob(test_name, bisect_config)
except auto_bisect.NotBisectableError as e:
return {'error': e.message}
try:
bisect_result = start_try_job.PerformBisect(bisect_job)
except request_handler.InvalidInputError as e:
bisect_result = {'error': e.message}
if 'error' in bisect_result:
if bisect_job.key:
bisect_job.key.delete()
return bisect_result
def _MakeBisectFYITryJob(test_name, bisect_config):
"""Creates a TryJob entity with the bisect config.
Args:
test_name: Name of the test case.
bisect_config: A dictionary of parameters for a bisect job.
Returns:
A TryJob entity, which has not yet been put in the datastore.
Raises:
NotBisectableError: A valid bisect config could not be created.
"""
bisect_bot = bisect_config.get('recipe_tester_name')
if not bisect_bot:
raise auto_bisect.NotBisectableError('Could not select a bisect bot.')
config_python_string = utils.BisectConfigPythonString(bisect_config)
bisect_job = try_job.TryJob(
bot=bisect_bot,
config=config_python_string,
bug_id=bisect_config.get('bug_id', -1),
master_name=bisect_config.get('master_name', 'ChromiumPerf'),
job_type='bisect-fyi',
job_name=test_name)
return bisect_job
def VerifyBisectFYIResults(job):
"""Verifies the bisect results against expected results in test config.
Args:
job: TryJob entity.
Returns:
A message with the missing properties, otherwise returns an empty string.
"""
expected_results = _GetBisectConfig(job).get('expected_results')
try:
utils.Validate(expected_results, job.results_data)
except ValueError as e:
return 'Bisect result is not as expected: %s.' % e
return ''
def IsBugUpdated(job, issue_tracker):
"""Verifies whether bug is updated with the bisect results."""
comment_info = issue_tracker.GetLastBugCommentsAndTimestamp(job.bug_id)
if not comment_info:
return False
last_comment_timestamp = datetime.datetime.strptime(
comment_info['timestamp'], '%Y-%m-%dT%H:%M:%S')
bug_update_timestamp = time.mktime(last_comment_timestamp.timetuple())
try_job_timestamp = time.mktime(job.last_ran_timestamp.timetuple())
if bug_update_timestamp <= try_job_timestamp:
return False
return True
def _TextBody(errors_list):
"""Returns the text body for an email about test failures."""
test_alerts = []
for test_name, data in errors_list.iteritems():
test_alerts.append(
_TEST_FAILURE_TEMPLATE % {
'test_name': test_name,
'error': data.get('error'),
'info': data.get('info', '')
}
)
return '.\n'.join(test_alerts)
def _SendEmailAlert(errors_list):
"""Sends email alert about bisect integration tests failures."""
mail.send_mail(
sender='gasper-alerts@google.com',
to='chrome-performance-monitoring-alerts@google.com',
subject='[Bisect FYI Alert]Failed to run bisect integration tests.',
body=_TextBody(errors_list))
def _GetBisectConfig(job):
bisect_fyi_configs = stored_object.Get(_BISECT_FYI_CONFIGS_KEY)
for test_name, config in bisect_fyi_configs.iteritems():
if job.job_name == test_name:
return config
return {}
|
"""PHI5 index of filename-authorname. This should be redone as for TLG."""
PHI5_INDEX = {'LAT0926.TXT': 'Marcus Manilius, Manilius', 'LAT0076.TXT': 'Gaius Cassius Hemina', 'LAT0500.TXT': 'Lucius Licinius Crassus', 'LAT0406.TXT': 'Publius Alfenus Varus', 'LAT0512.TXT': 'Marcus Duronius', 'LAT1351.TXT': 'Cornelius Tacitus', 'LAT0518.TXT': 'Aulus Furius Antias', 'LAT0546.TXT': 'Gaius Licinius Mucianus', 'LAT0552.TXT': 'Quintus Lutatius Catulus', 'LAT1020.TXT': 'Publius Papinius Statius', 'LAT2003.TXT': 'Caelius Apicius', 'LAT0031.TXT': 'Cornelia, mater Gracchorum', 'LAT0149.TXT': 'Carmen Arvale', 'LAT0426.TXT': 'Bellum Africum [Anonymous]', 'LAT0079.TXT': 'Hostius', 'LAT0678.TXT': 'Quintus Valerius Soranus', 'LAT0448.TXT': 'Gaius Iulius Caesar, Caesar', 'LAT0594.TXT': 'Lucius Novius', 'LAT0470.TXT': 'Marcus Porcius Cato Uticensis', 'LAT1056.TXT': 'Vitruvius', 'LAT0586.TXT': 'Mummius', 'LAT0005.TXT': 'Aquilius, comoed.', 'LAT0932.TXT': 'M. Valerius Messalla Corvinus', 'LAT0709.TXT': 'Domitius Marsus', 'LAT0558.TXT': 'Gaius Cilnius Maecenas', 'LAT0446.TXT': 'Quintus Servilius Caepio', 'LAT1248.TXT': 'Marcus Cornelius Fronto', 'LAT0436.TXT': 'Marcus Iunius Brutus [tyr.]', 'LAT0027.TXT': 'Lucius Cincius Alimentus', 'LAT1260.TXT': 'Hadrianus', 'LAT0016.TXT': 'Lucius Calpurnius Piso Frugi', 'LAT1339.TXT': 'Septimius Serenus', 'LAT0568.TXT': 'Gnaeus Matius', 'LAT0532.TXT': 'Quintus Hortensius Hortalus', 'LAT1306.TXT': 'Lucius Neratius Priscus', 'LAT0064.TXT': 'Gaius Fannius', 'LAT2028.TXT': 'Chalcidius', 'LAT0905.TXT': 'Marcus Antistius Labeo', 'LAT0917.TXT': 'Marcus Annaeus Lucanus', 'LAT0007.TXT': 'Atilius', 'LAT1032.TXT': 'Vagellius', 'LAT0969.TXT': 'Aulus Persius Flaccus', 'LAT1908.TXT': 'Gallus Antipater', 'LAT0664.TXT': 'Gaius Trebatius Testa', 'LAT0025.TXT': 'Marcus Porcius Cato M.f.M.n.', 'LAT0536.TXT': 'Decimus Laberius', 'LAT1224.TXT': 'Marcus Aurelius', 'LAT0445.TXT': 'Gaius vel Lucius Caepasius', 'LAT1235.TXT': 'Didascaliae et Per. in Terentium', 'LAT0454.TXT': 'Marcus Calidius', 'LAT0112.TXT': 'Gnaeus Naevius', 'LAT0676.TXT': 'Valerius Antias', 'LAT0408.TXT': 'Marcus Antonius triumvir', 'LAT1279.TXT': 'Laelius Felix', 'LAT0405.TXT': 'Clodius Tuscus', 'LAT0893.TXT': 'Quintus Horatius Flaccus, Horace', 'LAT1672.TXT': 'Iulius Valerius', 'LAT0824.TXT': 'Cn. Arulenus Caelius Sabinus', 'LAT2002.TXT': 'Albinus, poet.', 'LAT0640.TXT': 'Marcus Aemilius Scaurus', 'LAT2349.TXT': 'Maurus Servius Honoratus, Servius', 'LAT0306.TXT': 'Carmen Devotionis', 'LAT0082.TXT': 'Decimus Iunius Silanus', 'LAT1327.TXT': 'Sabidius', 'LAT0402.TXT': 'Valerius Aedituus', 'LAT0809.TXT': 'Aufidius Bassus', 'LAT0724.TXT': 'Cloatius Verus', 'LAT1357.TXT': 'Imp. Marcus Ulpius Traianus, Trajan', 'LAT0061.TXT': 'Fabius Pictor', 'LAT0836.TXT': 'Aulus Cornelius Celsus', 'LAT0496.TXT': 'Commentarius Anquisit. Sergii', 'LAT9254.TXT': 'Titius, gram.', 'LAT0680.TXT': 'Gaius Valgius Rufus', 'LAT1500.TXT': 'Altercatio Hadr. et Epicteti', 'LAT0878.TXT': 'Gaius Asinius Gallus', 'LAT0460.TXT': 'Gaius Papirius Carbo Arvina', 'LAT1251.TXT': 'Gaius, iur., Gaius', 'LAT2000.TXT': 'Ablabius', 'LAT1257.TXT': 'Granius Licinianus', 'LAT0661.TXT': 'Ticidas', 'LAT0401.TXT': 'Aufustius', 'LAT0537.TXT': 'Titus Labienus', 'LAT0515.TXT': 'Sextus (vel Spurius) Ennius', 'LAT0103.TXT': 'Gnaeus Marcius vates', 'LAT1282.TXT': 'Lentulus, mimus', 'LAT0451.TXT': 'Sinnius Capito', 'LAT0116.TXT': 'Marcus Pacuvius', 'LAT0963.TXT': 'Quintus Remmius Palaemon', 'LAT0002.TXT': 'Titus Annius Luscus', 'LAT0686.TXT': 'P. Terentius Varro Atacinus', 'LAT1203.TXT': 'Alfius Avitus', 'LAT0514.TXT': 'Egnatius', 'LAT0418.TXT': 'Titus Quinctius Atta', 'LAT0541.TXT': 'Cn. Cornel. Lentulus Marcell.', 'LAT0727.TXT': 'Cornificius Longus', 'LAT0522.TXT': 'Gaius Aelius Gallus', 'LAT0450.TXT': 'Lucius Iulius Caesar', 'LAT0800.TXT': 'Albinovanus Pedo', 'LAT0614.TXT': 'Q. Pompeius Q.f.A.n. Rufus', 'LAT0134.TXT': 'Publius Terentius Afer, Terence', 'LAT0128.TXT': 'P. Cornel. Scipio Aem. Afr.', 'LAT0455.TXT': 'Gaius Calpurnius Piso', 'LAT0564.TXT': 'Manius Manilius', 'LAT0587.TXT': 'Naevius, iunior', 'LAT0452.TXT': 'Gaius Iulius Caesar Strabo', 'LAT1294.TXT': 'Marcus Valerius Martialis', 'LAT0875.TXT': 'Cn. Cornel. Lentulus Gaetulicus', 'LAT1041.TXT': 'Pseudo-Varro', 'LAT1100.TXT': 'Calpurnius Flaccus', 'LAT0650.TXT': 'Sueius', 'LAT9221.TXT': 'Paulus Quaestor', 'LAT1023.TXT': 'Sulpicia, Caleni uxor', 'LAT0692.TXT': 'Appendix Vergiliana', 'LAT0815.TXT': 'Bruttedius Niger', 'LAT1234.TXT': 'Didascaliae et Argum. in Plautum', 'LAT1005.TXT': 'Rabirius', 'LAT0592.TXT': 'Novius, comoed.', 'LAT0419.TXT': 'Lucius Orbilius Pupillus', 'LAT1318.TXT': 'C. Plinius Caecilius Secundus, Pliny', 'LAT0458.TXT': 'Publius Cannutius', 'LAT0993.TXT': 'Precatio Terrae', 'LAT0412.TXT': 'Gaius Aquilius Gallus', 'LAT0130.TXT': 'P. Cornel. Scipio Nasica Ser.', 'LAT1506.TXT': 'Anonymi Fragmenta de Iure Fisci', 'LAT1011.TXT': 'Scribonius Largus', 'LAT0416.TXT': 'Lucius Ateius Praetextatus', 'LAT0400.TXT': 'Lucius Accius', 'LAT0966.TXT': 'Passienus Crispus', 'LAT0987.TXT': 'Publius Pomponius Secundus', 'LAT0137.TXT': 'Titinius', 'LAT1380.TXT': 'Philumenus medicus', 'LAT0010.TXT': 'Marcus Iunius Brutus [iur.]', 'LAT0118.TXT': 'L. Aemilius L.f.M.n. Paulus', 'LAT1297.TXT': 'Marullus', 'LAT0670.TXT': 'Quintus Aelius Tubero', 'LAT0527.TXT': 'Gannius', 'LAT0143.TXT': 'Trabea', 'LAT9510.TXT': 'Anonymi Grammatici', 'LAT0827.TXT': 'Caesellius Vindex', 'LAT0410.TXT': 'Aprissius (?)', 'LAT0703.TXT': 'Arbonius Silo', 'LAT0420.TXT': 'Publius Aufidius Namusa', 'LAT0674.TXT': 'Valerius, comoed.', 'LAT0469.TXT': 'Lucius Cassius Longinus', 'LAT0660.TXT': 'Albius Tibullus', 'LAT0321.TXT': 'Porcius Licinus', 'LAT0606.TXT': 'Lucius Marcius Philippus', 'LAT0562.TXT': 'Manilius, poet.', 'LAT0690.TXT': 'Publius Vergilius Maro, Virgil, Vergil', 'LAT2097.TXT': 'Sextus Paconianus', 'LAT0842.TXT': 'Gaius Clodius Licinus', 'LAT1512.TXT': 'Pomponius Porphyrio', 'LAT0984.TXT': 'Pompeius Trogus', 'LAT0821.TXT': 'Bucolica Einsidlensia', 'LAT0104.TXT': 'Gaius Memmius', 'LAT0622.TXT': 'Publilius Syrus', 'LAT0013.TXT': 'Caecilius Statius', 'LAT1291.TXT': 'Marianus', 'LAT0560.TXT': 'Helvius Mancia', 'LAT0908.TXT': 'Attius Labeo', 'LAT1377.TXT': 'Fragmenta Bobiensia', 'LAT2335.TXT': 'Anonymi de Differentiis [Fronto]', 'LAT1209.TXT': 'Annianus', 'LAT0635.TXT': 'Publius Saturius', 'LAT0127.TXT': 'P. Cornel. Scipio Afr. ma.', 'LAT2305.TXT': 'Caelius Aurelianus', 'LAT0935.TXT': 'Iulius Modestus', 'LAT0425.TXT': 'Publius Rutilius Lupus', 'LAT0959.TXT': 'Publius Ovidius Naso', 'LAT0484.TXT': 'Lucius Cincius', 'LAT0806.TXT': 'Gaius Ateius Capito', 'LAT0119.TXT': 'Titus Maccius Plautus', 'LAT0656.TXT': 'Servius Sulpicius Rufus', 'LAT2301.TXT': 'Q. Aurelius Memmius Symmachus', 'LAT0091.TXT': 'Licinius Imbrex', 'LAT0899.TXT': 'Hyginus Astronomus', 'LAT0923.TXT': 'Aemilius Macer', 'LAT0624.TXT': 'Quintus Claudius Quadrigarius', 'LAT0530.TXT': 'Aulus Hirtius', 'LAT0534.TXT': 'Iuventius, comoed.', 'LAT0466.TXT': 'Aulus Cascellius', 'LAT0140.TXT': 'Gaius Titius', 'LAT0034.TXT': 'Gaius Scribonius Curio avus', 'LAT1050.TXT': 'Lucius Verginius Rufus', 'LAT0538.TXT': 'Laevius', 'LAT0492.TXT': 'Commentarii Augurum', 'LAT1370.TXT': 'Quintus Terentius Scaurus', 'LAT0854.TXT': 'Cornificius Gallus', 'LAT0085.TXT': 'Gaius Laelius Sapiens', 'LAT0444.TXT': 'Marcus Caelius Rufus', 'LAT0535.TXT': 'Marcus Iuventius Laterensis', 'LAT0533.TXT': 'Gaius Iulius Hyginus', 'LAT1212.TXT': 'Apuleius Madaurensis', 'LAT0851.TXT': 'Cornelius Severus', 'LAT0556.TXT': 'Gaius Licinius Macer', 'LAT0300.TXT': 'Sempronius Asellio', 'LAT0658.TXT': 'Tabulae Censoriae', 'LAT0869.TXT': 'Marcus Verrius Flaccus', 'LAT0615.TXT': 'Q. Pompeius Q.f.Q.n. Rufus', 'LAT0682.TXT': 'Lucius Varius Rufus', 'LAT1342.TXT': 'Siculus Flaccus', 'LAT0524.TXT': 'Gaius Cornelius Gallus, Gallus', 'LAT0881.TXT': 'Claudius Caesar Germanicus', 'LAT0486.TXT': 'Gaius Helvius Cinna', 'LAT0125.TXT': 'Publius Mucius Scaevola', 'LAT1206.TXT': 'Lucius Ampelius', 'LAT0646.TXT': 'Lucius Cornelius Sisenna', 'LAT0978.TXT': 'Gaius Plinius Secundus, Pliny', 'LAT0088.TXT': 'M. Aemilius Lepidus Porcina', 'LAT0301.TXT': 'Gnaeus Domitius Ahenobarbus', 'LAT0117.TXT': 'Papinius, epigram.', 'LAT0502.TXT': 'Aulus Cremutius Cordus', 'LAT0902.TXT': 'Iulius Africanus', 'LAT1266.TXT': 'Hyginus Gromaticus', 'LAT1276.TXT': 'Decimus Iunius Iuvenalis, Juvenal', 'LAT0914.TXT': 'Titus Livius, Livy', 'LAT0004.TXT': 'Appius Claudius Caecus', 'LAT0473.TXT': 'Q. Lutatius Catulus iunior', 'LAT0414.TXT': 'Lucius Arruntius', 'LAT0866.TXT': 'Fenestella', 'LAT0636.TXT': 'Quintus Mucius Scaevola', 'LAT0625.TXT': 'Lucius Quinctius', 'LAT0146.TXT': 'Sextus Turpilius', 'LAT0730.TXT': 'Tarquitius Priscus', 'LAT1254.TXT': 'Aulus Gellius', 'LAT0642.TXT': 'Sevius Nicanor', 'LAT0442.TXT': 'Aulus Caecina', 'LAT0303.TXT': 'Aurelius Opillus', 'LAT1336.TXT': 'Scaevus Memor', 'LAT0590.TXT': 'Publius Nigidius Figulus', 'LAT0432.TXT': 'Marcus Furius Bibaculus', 'LAT0860.TXT': 'Quintus Curtius Rufus', 'LAT0929.TXT': 'Pomponius Mela', 'LAT1044.TXT': 'Velleius Paterculus', 'LAT0478.TXT': 'Quintus Tullius Cicero', 'LAT1038.TXT': 'Valerius Maximus', 'LAT0920.TXT': 'Lucilius iunior', 'LAT0413.TXT': 'Gavius Bassus', 'LAT0990.TXT': 'Precatio Omnium Herbarum', 'LAT0315.TXT': 'Marcus Iunius Gracchanus', 'LAT1014.TXT': 'Lucius Annaeus Seneca senior', 'LAT0073.TXT': 'Gaius Sempronius Gracchus', 'LAT0330.TXT': 'Volcacius Sedigitus', 'LAT0620.TXT': 'Sextus Propertius', 'LAT0591.TXT': 'Ninnius Crassus', 'LAT0644.TXT': 'Sextilius Ena', 'LAT0526.TXT': 'Gaius Servilius Glaucia', 'LAT0618.TXT': 'Lucius Pomponius Bononiensis', 'LAT1053.TXT': 'Vibius Crispus', 'LAT0430.TXT': 'Bellum Hispaniense [Anonymous]', 'LAT0094.TXT': 'Lucius Livius Andronicus', 'LAT0706.TXT': 'Carmen de Bello Aegyptiaco', 'LAT1518.TXT': 'Terentianus Maurus', 'LAT1321.TXT': 'Sextus Pomponius', 'LAT0309.TXT': 'Carmen Evocationis', 'LAT2300.TXT': 'Aemilius Sura', 'LAT0122.TXT': 'Aulus Postumius Albinus', 'LAT0694.TXT': 'Volumnius', 'LAT1604.TXT': 'Iulius Atherianus', 'LAT0600.TXT': 'Gaius Oppius', 'LAT3211.TXT': 'Argum. Aen. et Tetrast.', 'LAT0672.TXT': 'Turranius Niger', 'LAT0070.TXT': 'Gnaeus Gellius', 'LAT1515.TXT': 'Quintus Serenus (Sammonicus)', 'LAT0474.TXT': 'Marcus Tullius Cicero, Cicero, Tully', 'LAT1345.TXT': 'Silius Italicus', 'LAT1374.TXT': 'Velius Longus', 'LAT0830.TXT': 'Titus Calpurnius Siculus', 'LAT0857.TXT': 'Lucius Annaeus Cornutus', 'LAT2806.TXT': 'Iustinianus, Justinian, Digest', 'LAT0884.TXT': 'Gracchus, trag.', 'LAT1348.TXT': 'Gaius Suetonius Tranquillus', 'LAT0938.TXT': 'Iulius Montanus', 'LAT0721.TXT': 'Antonius Panurgus', 'LAT0944.TXT': 'Imperator Nero', 'LAT0582.TXT': 'Q. Caecilius Metellus Numid.', 'LAT0648.TXT': 'Staberius Eros', 'LAT0981.TXT': 'Gaius Asinius Pollio', 'LAT0638.TXT': 'Q. Mucius Scaevola [pontifex]', 'LAT0100.TXT': 'Luscius Lanuvinus', 'LAT0312.TXT': 'Fabius Dossennus', 'LAT2302.TXT': 'L. Aurel. Avianius Symmachus', 'LAT0996.TXT': 'Marcus Valerius Probus', 'LAT0019.TXT': 'Gaius Papirius Carbo', 'LAT0588.TXT': 'Cornelius Nepos', 'LAT0631.TXT': 'Gaius Sallustius Crispus', 'LAT1285.TXT': 'Lucius Volusius Maecianus', 'LAT2123.TXT': 'Publilius Optatianus Porfyrius', 'LAT1029.TXT': 'Turnus', 'LAT1227.TXT': 'Balbus, grom.', 'LAT0404.TXT': 'Lucius Afranius', 'LAT0628.TXT': 'Publius Rutilius Rufus', 'LAT0550.TXT': 'Titus Lucretius Carus', 'LAT0037.TXT': 'Gaius Scribonius Curio pater', 'LAT0058.TXT': 'Q. Fabius Maximus Servilianus', 'LAT0456.TXT': 'Gaius Licinius Macer Calvus', 'LAT0423.TXT': 'Lucius Herennius Balbus', 'LAT0516.TXT': 'Gaius Erucius', 'LAT1218.TXT': 'Sentius Augurinus', 'LAT0067.TXT': 'Favorinus', 'LAT0043.TXT': 'Quintus Ennius', 'LAT9969.TXT': 'Vita Iuvenalis', 'LAT0634.TXT': 'Santra', 'LAT1035.TXT': 'Gaius Valerius Flaccus', 'LAT0528.TXT': 'Granius Flaccus', 'LAT0911.TXT': 'Laus Pisonis', 'LAT0652.TXT': 'Lucius Cornelius Sulla', 'LAT2456.TXT': 'Parthenius Presbyter', 'LAT1263.TXT': 'Hyginus, myth.', 'LAT0494.TXT': 'Commentarii Consulares', 'LAT0498.TXT': 'Gaius Aurelius Cotta', 'LAT1229.TXT': 'Flavius Caper', 'LAT0109.TXT': 'Q. Caecilius Metellus Maced.', 'LAT0630.TXT': 'Sacra Argeorum', 'LAT9505.TXT': 'Anonymi Comici et Tragici', 'LAT0662.TXT': 'Marcus Tullius Tiro', 'LAT0487.TXT': 'Publius Clodius Pulcher', 'LAT1000.TXT': 'Pupius (?)', 'LAT2468.TXT': 'Aurelius Augustinus', 'LAT0668.TXT': 'Gnaeus Tremelius Scrofa', 'LAT1363.TXT': 'Aemilius Asper', 'LAT0890.TXT': 'Homerus Latinus', 'LAT1103.TXT': 'Priapea', 'LAT0327.TXT': 'L. Aelius Praeconinus Stilo', 'LAT2331.TXT': 'Scriptores Historiae Augustae', 'LAT0812.TXT': 'Gaius Caesius Bassus', 'LAT9500.TXT': 'Anonymi Epici et Lyrici', 'LAT0576.TXT': 'M. Valerius Messalla Rufus', 'LAT0596.TXT': 'Numitorius', 'LAT0863.TXT': 'Dorcatius', 'LAT0106.TXT': 'Caecilius Metellus', 'LAT0584.TXT': 'Mimi Poetarum Incertorum', 'LAT0488.TXT': 'Servius Clodius', 'LAT0887.TXT': 'Grattius', 'LAT0845.TXT': 'L. Iunius Moderatus Columella', 'LAT0022.TXT': 'Marcus Porcius Cato, Cato', 'LAT1002.TXT': 'Marcus Fabius Quintilianus', 'LAT1242.TXT': 'Annius Florus', 'LAT0409.TXT': 'Quintus Cornificius', 'LAT0540.TXT': 'Tullius Laurea', 'LAT0972.TXT': 'Petronius', 'LAT0975.TXT': 'Phaedrus', 'LAT0803.TXT': 'Quintus Asconius Pedianus', 'LAT0428.TXT': 'Bellum Alexandrinum [Anonymous]', 'LAT0574.TXT': 'Gaius Memmius L. f.', 'LAT1236.TXT': 'Sextus Pompeius Festus', 'LAT0472.TXT': 'Gaius Valerius Catullus', 'LAT0097.TXT': 'Gaius Lucilius', 'LAT0510.TXT': 'Publius Cornelius Dolabella', 'LAT2150.TXT': 'Zeno of Verona', 'LAT1017.TXT': 'Lucius Annaeus Seneca iunior', 'LAT0028.TXT': 'Lucius Coelius Antipater', 'LAT0046.TXT': 'Cornelius Epicadus', 'LAT0324.TXT': 'Saserna', 'LAT0490.TXT': 'Publius Cominius', 'LAT1245.TXT': 'Sextus Iulius Frontinus', 'LAT2434.TXT': 'Hilarius Arelatensis', 'LAT0616.TXT': 'Pompilius', 'LAT1221.TXT': 'C. Iul. Caes. Augustus Octavianus', 'LAT1047.TXT': 'Veranius', 'LAT0302.TXT': 'Marcus Antonius', 'LAT0684.TXT': 'Marcus Terentius Varro, Varro'}
|
from .token import Token
def combine_tokens(*tokens):
"""
Combine multiple tokens into one new Token, so that one single usage can be given
for these tokens.
"""
res = []
for token in tokens:
children = token.children
res.extend(children)
return Token(res)
|
import logger
import time
import unittest
import os
import subprocess
import types
import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
from threading import Thread
import configparser
from TestInput import TestInputSingleton
from security.rbac_base import RbacBase
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
"""
*** IMPORTANT! NEED TO READ BEFORE RUN UI TEST ***
- Server that is used as host UI slave must be in uiconf session (ask IT to get it)
ini file format must follow format below [uiconf]
- Jenkins slave must install python selenium as we import selenium above
#### ini file start here
[global]
username:xxxx
password:xxxx
#ssh_key=/home/xxxx
port:8091
[servers]
1:xxx.xxx.xxx.xxx
2:xxx.xx.xxx.xx
[membase]
rest_username:Administrator
rest_password:xxxxxxx
[uiconf]
browser:chrome
chrome_path:path_to_chrome_driver
selenium_path:path_to_selenium_standalone_server
selenium_ip:UI_slave_IP
selenium_port:4444
selenium_user:username_used_to_login_to_UI_slave
selenium_password:password_used_to_login_to_UI_slave
screenshots:logs/screens
### ini file end here
"""
class BaseUITestCase(unittest.TestCase):
# selenium thread
def _start_selenium(self):
host = self.machine.ip
if host in ['localhost', '127.0.0.1']:
os.system("java -jar -Dwebdriver.chrome.driver=%s "
"%sselenium-server-standalone*.jar > /tmp/selenium.log 2>&1"
% (self.input.ui_conf['chrome_path'],
self.input.ui_conf['selenium_path']))
else:
""" go to remote server with better video driver to display browser """
self.shell.execute_command('{0}start-selenium.bat > /tmp/selenium.log 2>&1 &' \
.format(self.input.ui_conf['selenium_path']))
def _kill_old_drivers(self):
if self.shell.extract_remote_info().type.lower() == 'windows':
self.shell.execute_command('taskkill /F /IM chromedriver.exe')
self.shell.execute_command('taskkill /F /IM chrome.exe')
def _wait_for_selenium_is_started(self, timeout=10):
if self.machine.ip in ['localhost', '127.0.0.1']:
start_time = time.time()
while (time.time() - start_time) < timeout:
log = open("/tmp/selenium.log")
if log.read().find('Started org.openqa.jetty.jetty.Server') > -1 or \
log.read().find('Selenium Server is up and running') > -1:
log.close()
if self._is_selenium_running():
time.sleep(1)
return
time.sleep(1)
else:
time.sleep(timeout)
def _start_selenium_thread(self):
self.t = Thread(target=self._start_selenium,
name="selenium",
args=())
self.t.start()
def _is_selenium_running(self):
self.log.info("check if selenium is running")
host = self.machine.ip
if host in ['localhost', '127.0.0.1']:
cmd = 'ps -ef|grep selenium-server'
output = subprocess.getstatusoutput(cmd)
if str(output).find('selenium-server-standalone') > -1:
self.log.info("selenium is running")
return True
else:
"""need to add code to go either windows or linux """
# cmd = "ssh {0}@{1} 'bash -s' < 'tasklist |grep selenium-server'"
# .format(self.input.servers[0].ssh_username,
# host)
cmd = "tasklist | grep java"
o, r = self.shell.execute_command(cmd)
# cmd = "ssh {0}@{1} 'bash -s' < 'ps -ef|grep selenium-server'"
if str(o).find('java') > -1:
self.log.info("selenium is running")
return True
return False
def add_built_in_server_user(self, testuser=None, rolelist=None, node=None):
"""
From spock, couchbase server is built with some users that handles
some specific task such as:
cbadminbucket
Default added user is cbadminbucket with admin role
"""
rest = RestConnection(self.master)
versions = rest.get_nodes_versions()
for version in versions:
if "5" > version:
self.log.info("Atleast one of the nodes in the cluster is "
"pre 5.0 version. Hence not creating rbac user "
"for the cluster. RBAC is a 5.0 feature.")
return
if testuser is None:
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'password': 'password'}]
if rolelist is None:
rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
'roles': 'admin'}]
if node is None:
node = self.master
self.log.info("**** add built-in '%s' user to node %s ****" % (testuser[0]["name"],
node.ip))
RbacBase().create_user_source(testuser, 'builtin', node)
self.log.info("**** add '%s' role to '%s' user ****" % (rolelist[0]["roles"],
testuser[0]["name"]))
status = RbacBase().add_user_role(rolelist, RestConnection(node), 'builtin')
return status
def setUp(self):
try:
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.browser = self.input.ui_conf['browser']
self.replica = self.input.param("replica", 1)
self.case_number = self.input.param("case_number", 0)
self.machine = self.input.ui_conf['server']
self.driver = None
self.shell = RemoteMachineShellConnection(self.machine)
# avoid clean up if the previous test has been tear down
if not self.input.param("skip_cleanup", True) \
or self.case_number == 1:
self.tearDown()
self._log_start(self)
self._kill_old_drivers()
# thread for selenium server
if not self._is_selenium_running():
self.log.info('start selenium')
self._start_selenium_thread()
self._wait_for_selenium_is_started()
self.log.info('start selenium session')
if self.browser == 'firefox':
self.log.info("Test Couchbase Server UI in Firefox")
self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
.format(self.machine.ip,
self.machine.port),
desired_capabilities=DesiredCapabilities.FIREFOX)
elif self.browser == 'chrome':
self.log.info("Test Couchbase Server UI in Chrome")
self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
.format(self.machine.ip,
self.machine.port),
desired_capabilities=DesiredCapabilities.CHROME)
""" need to add support test on Internet Explorer """
self.log.info('*** selenium started ***')
self.driver.get("http://" + self.servers[0].ip + ":8091")
self.username = self.input.membase_settings.rest_username
self.password = self.input.membase_settings.rest_password
### temp work around, maximize_window is buggy
self.driver.set_window_size(2048, 1200)
###
self.driver.maximize_window()
except Exception as ex:
self.input.test_params["stop-on-failure"] = True
self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
self.fail(ex)
@staticmethod
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(),
self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
@staticmethod
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(),
self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
def tearDown(self):
try:
test_failed = len(self._resultForDoCleanups.errors)
if self.driver and test_failed:
BaseHelper(self).create_screenshot()
if self.driver:
self.driver.close()
if test_failed and TestInputSingleton.input.param("stop-on-failure", False):
print("test fails, teardown will be skipped!!!")
return
rest = RestConnection(self.servers[0])
try:
reb_status = rest._rebalance_progress_status()
except ValueError as e:
if str(e) == 'No JSON object could be decoded':
print("cluster not initialized!!!")
return
if reb_status == 'running':
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
except Exception as e:
raise e
finally:
if self.driver:
self.shell.disconnect()
def sleep(self, timeout=1, message=""):
self.log.info("sleep for {0} secs. {1} ...".format(timeout, message))
time.sleep(timeout)
class Control:
def __init__(self, selenium, by=None, web_element=None):
self.selenium = selenium
self.by = by
if by:
try:
self.web_element = self.selenium.find_element_by_xpath(by)
self.present = True
if self.web_element is None:
self.present = False
except NoSuchElementException as ex:
self.present = False
else:
self.web_element = web_element
self.present = True
def highlightElement(self):
if self.by:
print(("document.evaluate(\"{0}\", document, null, XPathResult.ANY_TYPE, null).iterateNext().setAttribute('style','background-color:yellow');".format(self.by)))
self.selenium.execute_script("document.evaluate(\"{0}\",document, null, XPathResult.ANY_TYPE, null).iterateNext().setAttribute('style','background-color:yellow');".format(self.by))
def type_native(self, text):
# In OS X, Ctrl-A doesn't work to select all, instead Command+A has to be used.
key = Keys.CONTROL
if self.selenium.desired_capabilities.get('platform').lower() == 'mac':
key = Keys.COMMAND
ActionChains(self.selenium).click(self.web_element).perform()
ActionChains(self.selenium).key_down(key).perform()
ActionChains(self.selenium).send_keys('a').perform()
ActionChains(self.selenium).key_up(key).perform()
ActionChains(self.selenium).send_keys(Keys.DELETE).perform()
ActionChains(self.selenium).send_keys(text).perform()
def click(self, highlight=True):
if highlight:
self.highlightElement()
self.web_element.click()
def click_native(self):
ActionChains(self.selenium).move_to_element(self.web_element).perform()
ActionChains(self.selenium).click(self.web_element).perform()
def click_with_mouse_over(self):
ActionChains(self.selenium).move_to_element(self.web_element).perform()
ActionChains(self.selenium).click(self.web_element).perform()
ActionChains(self.selenium).key_down(Keys.ENTER).perform()
ActionChains(self.selenium).key_up(Keys.ENTER).perform()
def type(self, message, is_pwd=False):
if message:
self.highlightElement()
if not is_pwd:
self.web_element.clear()
if isinstance(message, bytes) and message.find('\\') > -1:
for symb in list(message):
if symb == '\\':
self.web_element.send_keys(Keys.DIVIDE)
else:
self.web_element.send_keys(symb)
else:
self.web_element.send_keys(message)
def check(self, setTrue=True):
if setTrue:
if not self.is_checked():
self.click()
else:
if self.is_checked():
self.click()
def is_present(self):
return self.present
def is_displayed(self):
return self.present and self.web_element.is_displayed()
def is_checked(self):
checked = self.web_element.get_attribute("checked")
return checked is not None
def get_text(self):
self.highlightElement()
return self.web_element.text
def get_attribute(self, atr):
return self.web_element.get_attribute(atr)
def select(self, label=None, value=None):
element = Select(self.web_element)
if label:
element.select_by_visible_text(label)
return
if value:
element.select_by_value(value)
return
def mouse_over(self):
ActionChains(self.selenium).move_to_element(self.web_element).perform()
class ControlsHelper():
def __init__(self, driver):
self.driver = driver
file = "pytests/ui/uilocators-spock.conf"
config = configparser.ConfigParser()
config.read(file)
self.locators = config
def find_control(self, section, locator, parent_locator=None, text=None):
by = self._find_by(section, locator, parent_locator)
if text:
by = by.format(text)
return Control(self.driver, by=by)
def find_controls(self, section, locator, parent_locator=None):
by = self._find_by(section, locator, parent_locator)
controls = []
elements = self.driver.find_elements_by_xpath(by)
for element in elements:
controls.append(Control(self.driver, web_element=element))
return controls
def find_first_visible(self, section, locator, parent_locator=None, text=None):
by = self._find_by(section, locator, parent_locator)
if text:
by = by.format(text)
elements = self.driver.find_elements_by_xpath(by)
for element in elements:
try:
if element.is_displayed():
return Control(self.driver, web_element=element)
except StaleElementReferenceException:
pass
return None
def _find_by(self, section, locator, parent_locator=None):
if parent_locator:
return self.locators.get(section, parent_locator) + \
self.locators.get(section, locator)
else:
return self.locators.get(section, locator)
class BaseHelperControls:
def __init__(self, driver):
helper = ControlsHelper(driver)
self._user_field = helper.find_control('login', 'user_field')
self._user_password = helper.find_control('login', 'password_field')
self._login_btn = helper.find_control('login', 'login_btn')
self._logout_btn = helper.find_control('login', 'logout_btn')
self._user_menu_show = helper.find_control('login', 'user_menu_show')
self.error = helper.find_control('login', 'error')
class BaseHelper:
def __init__(self, tc):
self.tc = tc
self.controls = BaseHelperControls(self.tc.driver)
self.wait = WebDriverWait(tc.driver, timeout=100)
def wait_ajax_loaded(self):
try:
pass
# self.wait.until_not(lambda fn: self.controls.ajax_spinner.is_displayed(),
# "Page is still loaded")
except StaleElementReferenceException:
pass
def create_screenshot(self):
path_screen = self.tc.input.ui_conf['screenshots'] or 'logs/screens'
full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
self.tc.log.info('screenshot is available: %s' % full_path)
if not os.path.exists(path_screen):
os.mkdir(path_screen)
self.tc.driver.get_screenshot_as_file(os.path.abspath(full_path))
def login(self, user=None, password=None):
self.tc.log.info("Try to login to Couchbase Server in browser")
if not user:
user = self.tc.input.membase_settings.rest_username
if not password:
password = self.tc.input.membase_settings.rest_password
self.wait.until(lambda fn: self.controls._user_field.is_displayed(),
"Username field is not displayed in %d sec" % (self.wait._timeout))
self.controls._user_field.type(user)
self.wait.until(lambda fn: self.controls._user_password.is_displayed(),
"Password field is not displayed in %d sec" % (self.wait._timeout))
self.controls._user_password.type(password, is_pwd=True)
self.wait.until(lambda fn: self.controls._login_btn.is_displayed(),
"Login Button is not displayed in %d sec" % (self.wait._timeout))
self.controls._login_btn.click()
self.tc.log.info("user %s is logged in" % user)
def logout(self):
self.tc.log.info("Try to logout")
self.controls._user_menu_show.click()
# self.wait.until(lambda fn: self.controls._logout_btn.is_displayed(),
# "Logout Button is not displayed in %d sec" % (self.wait._timeout))
self.controls._logout_btn.click()
time.sleep(3)
self.tc.log.info("You are logged out")
def is_logged_in(self):
self.wait.until(lambda fn: self.controls._logout_btn.is_displayed(),
"Logout Button is not displayed in %d sec" % (self.wait._timeout))
return self.controls._logout_btn.is_displayed()
def wait_for_login_page(self):
count = 0
while not (self.controls._user_field.is_displayed() or count >= 6):
self.tc.log.info("Login page not yet displayed.. sleeping for 10 secs")
time.sleep(10)
count += 1
def loadSampleBucket(self, node, bucketName):
self.tc.log.info("Loading sample bucket %s", bucketName)
shell = RemoteMachineShellConnection(node)
username = self.tc.input.membase_settings.rest_username
password = self.tc.input.membase_settings.rest_password
sample_bucket_path = "/opt/couchbase/samples/%s-sample.zip" % bucketName
command = '/opt/couchbase/bin/cbdocloader -n ' + node.ip + ':' + \
node.port + ' -u ' + username + ' -p ' + password + \
' -b ' + bucketName + ' -s 100 ' + sample_bucket_path
self.tc.log.info('Command: %s ', command)
o, r = shell.execute_command(command)
shell.log_command_output(o, r)
self.tc.log.info("Done loading sample bucket %s", bucketName)
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import argparse, os
import numpy as np
import pickle
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import multi_gpu_model
def autoencoder_model(input_dims):
"""
Defines a Keras model for performing the anomaly detection.
This model is based on a simple dense autoencoder.
PARAMS
======
inputs_dims (integer) - number of dimensions of the input features
RETURN
======
Model (tf.keras.models.Model) - the Keras model of our autoencoder
"""
# Autoencoder definition:
inputLayer = Input(shape=(input_dims,))
h = Dense(64, activation="relu")(inputLayer)
h = Dense(64, activation="relu")(h)
h = Dense(8, activation="relu")(h)
h = Dense(64, activation="relu")(h)
h = Dense(64, activation="relu")(h)
h = Dense(input_dims, activation=None)(h)
return Model(inputs=inputLayer, outputs=h)
def parse_arguments():
"""
Parse the command line arguments passed when running this training script
RETURN
======
args (ArgumentParser) - an ArgumentParser instance command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--n_mels', type=int, default=64)
parser.add_argument('--frame', type=int, default=5)
parser.add_argument('--learning-rate', type=float, default=0.01)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--gpu-count', type=int, default=os.environ['SM_NUM_GPUS'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--training', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
args, _ = parser.parse_known_args()
return args
def train(training_dir, model_dir, n_mels, frame, lr, batch_size, epochs, gpu_count):
"""
Main training function.
PARAMS
======
training_dir (string) - location where the training data are
model_dir (string) - location where to store the model artifacts
n_mels (integer) - number of Mel buckets to build the spectrograms
frames (integer) - number of sliding windows to use to slice the Mel spectrogram
lr (float) - learning rate
batch_size (integer) - batch size
epochs (integer) - number of epochs
gpu_count (integer) - number of GPU to distribute the job on
"""
# Load training data:
train_data_file = os.path.join(training_dir, 'train_data.pkl')
with open(train_data_file, 'rb') as f:
train_data = pickle.load(f)
# Builds the model:
model = autoencoder_model(n_mels * frame)
print(model.summary())
if gpu_count > 1:
model = multi_gpu_model(model, gpus=gpu_count)
# Model preparation:
model.compile(
loss='mean_squared_error',
optimizer=Adam(learning_rate=lr),
metrics=['accuracy']
)
# Model training: this is an autoencoder, we
# use the same data for training and validation:
history = model.fit(
train_data,
train_data,
batch_size=batch_size,
validation_split=0.1,
epochs=epochs,
shuffle=True,
verbose=2
)
# Save the trained model:
os.makedirs(os.path.join(model_dir, 'model/1'), exist_ok=True)
model.save(os.path.join(model_dir, 'model/1'))
if __name__ == '__main__':
# Initialization:
tf.random.set_seed(42)
# Parsing command line arguments:
args = parse_arguments()
epochs = args.epochs
n_mels = args.n_mels
frame = args.frame
lr = args.learning_rate
batch_size = args.batch_size
gpu_count = args.gpu_count
model_dir = args.model_dir
training_dir = args.training
# Launch the training:
train(training_dir, model_dir, n_mels, frame, lr, batch_size, epochs, gpu_count)
|
import sys
import os
from lxml import etree
"""
Description:
This script modifies all UPS files in the current directory from which you run the script
with a user specified file extension. It updates the <MomentumSolver> node in the ups file
to be compatible with the latest code by (if it currently is not) by adding a <wall_closure>
node with a default model (constant coefficient).
Note that your original ups file is stored with a *.orig_ups extension.
Usage:
python update_table_entry.py file_extension
file_name: modify the file with this name to update the table section in the UPS file.
example: python updated_table_entry.py myinput.ups
--do_all_ups_files: do all files in this directory with an .ups extension
--do_all_xml_files: do all files in this directory with an .xml extension
--do_all_ups_ask_permission : do all files in this directory with an .ups extension but ask permission first per file.
[--help, -help, -h]: print this message
"""
def fix_ups(filename):
parser = etree.XMLParser(remove_comments=False, remove_blank_text=True)
tree = etree.parse(filename, parser)
root = tree.getroot()
CFD = root.find('CFD')
Arches = CFD.find('ARCHES')
ExplicitSolver = Arches.find('ExplicitSolver')
MomentumSolver = ExplicitSolver.find('MomentumSolver')
newEntry = etree.Element('wall_closure')
newEntry.attrib['type'] = 'constant_coefficient'
MomentumSolver.insert(0,newEntry)
ConvScheme = MomentumSolver.find("wall_closure")
newEntry = etree.Element('wall_csmag')
newEntry.text = '0.4'
ConvScheme.insert(0,newEntry)
os.system('cp '+filename+' '+filename+'.orig_ups')
tree.write(filename, pretty_print=True, encoding="ISO-8859-1")
#------------------------------------------------------------
def usage():
print 'Description: '
print ' This script modifies all tables in the current directory from which you run the script '
print ' with a user specified file extension. It updates the <Properties> node in the ups file '
print ' to be compatible with the latest code by (if it currently is not) by adding a <table> '
print ' node with a generic label (which you can change). '
print ' Note that your original ups file is stored with a *.orig_ups extension.'
print ''
print 'Usage: '
print ' python update_table_entry.py file_extension'
print ' file_name: modify the file with this name to update the table section in the UPS file.'
print ' example: python updated_table_entry.py myinput.ups'
print ' --do_all_ups_files: do all files in this directory with an .ups extension'
print ' --do_all_xml_files: do all files in this directory with an .xml extension'
print ' --do_all_ups_ask_permission : do all files in this directory with an .ups extension but ask permission first per file.'
print ' [--help, -help, -h]: print this message '
exit()
args = sys.argv
if len(args) != 2: usage()
if args[1] == '-h': usage()
if args[1] == '--help': usage()
if args[1] == '-help': usage()
if args[1] == '--do_all_ups_files':
for filename in os.listdir('.'):
if filename.endswith('.ups'):
print 'Fixing file: ', filename
fix_ups(filename)
elif args[1] == '--do_all_xml_files':
for filename in os.listdir('.'):
if filename.endswith('.xml'):
print 'Fixing file: ', filename
fix_ups(filename)
elif args[1] == '--do_all_ups_ask_permission':
for filename in os.listdir('.'):
if filename.endswith('.ups'):
print 'For file named: ', filename
test = raw_input('Please indicate if you want it updated [y/n]: ')
if test == 'y':
fix_ups(filename)
else:
print 'Skiping this file. '
else:
print 'Fixing file: ', args[1]
fix_ups(args[1])
print 'Done. The original UPS file is saved with the extension *.orig_ups'
|
import torch
import torch.nn.functional as F
def istft(stft_matrix,length, hop_length=None, win_length=None, window='hann',
center=True, normalized=False, onesided=True):
"""stft_matrix = (freq, time, 2) (batch dimension not included)
- Based on librosa implementation and Keunwoo Choi's implementation
- librosa: http://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#istft
- Keunwoo Choi's: https://gist.github.com/keunwoochoi/2f349e72cc941f6f10d4adf9b0d3f37e#file-istft-torch-py
"""
assert normalized == False
assert onesided == True
assert window == 'hann'
assert center == True
__import__('pdb').set_trace()
device = stft_matrix.device
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
istft_window = torch.hann_window(n_fft, device=device)
n_frames = stft_matrix.shape[1]
expected_signal_len = n_fft + hop_length * (n_frames - 1)
conj = torch.tensor([1., -1.], requires_grad=False, device=device)
# [a,b,c,d,e] -> [a,b,c,d,e,d,c,b]
stft_matrix = torch.cat(
(stft_matrix, conj*stft_matrix.flip(dims=(0,))[1:-1]), dim=0)
# now shape is [n_fft, T, 2]
stft_matrix = stft_matrix.transpose(0, 1)
stft_matrix = torch.ifft(stft_matrix, signal_ndim=1)[:, :, 0] # get real part of ifft
ytmp = stft_matrix * istft_window
ytmp = ytmp.transpose(0, 1)
ytmp = ytmp.unsqueeze(0)
# now [1, n_fft, T]. this is stack of `ytmp` in librosa/core/spectrum.py
eye = torch.eye(n_fft, requires_grad=False, device=device)
eye = eye.unsqueeze(1) # [n_fft, 1, n_fft]
y = F.conv_transpose1d(ytmp, eye, stride=hop_length, padding=0)
y = y.view(-1)
assert y.size(0) == expected_signal_len
y = y[n_fft//2:]
y = y[:length]
coeff = n_fft/float(hop_length) / 2.0 # -> this might go wrong if curretnly asserted values (especially, `normalized`) changes.
return y / coeff
|
import logging
from contextlib import ExitStack
from typing import ContextManager, Dict, Iterable, Optional, List
from .utils.timer import LoggingTimer
from .utils.image import (
ImageArray,
ImageSize,
get_image_size,
apply_alpha,
combine_images
)
from .sinks import (
T_OutputSink,
get_image_output_sink_for_path
)
from .config import load_config, apply_config_override_map, LayerConfig
from .sources import get_image_source_for_path, T_ImageSource
from .filters.api import LayerFilter, create_filter
LOGGER = logging.getLogger(__name__)
class LayerException(RuntimeError):
pass
class RuntimeContext:
def __init__(
self,
timer: LoggingTimer,
preferred_image_size: ImageSize = None
):
self.timer = timer
self.preferred_image_size = preferred_image_size
self.frame_cache = {}
def get_image_source_for_layer_config(
layer_config: LayerConfig,
preferred_image_size: Optional[ImageSize]
) -> ContextManager[Iterable[ImageArray]]:
width = layer_config.get('width')
height = layer_config.get('height')
if width and height:
image_size = ImageSize(width=width, height=height)
else:
image_size = preferred_image_size
return get_image_source_for_path(
layer_config.get('input_path'),
image_size=image_size,
repeat=layer_config.get('repeat'),
preload=layer_config.get('preload'),
fps=layer_config.get('fps'),
fourcc=layer_config.get('fourcc')
)
class RuntimeBranch:
def __init__(self, runtime_layers: List['RuntimeLayer']):
self.runtime_layers = runtime_layers
@staticmethod
def from_config(
branch_config: dict,
branch_id: str,
context: RuntimeContext
) -> 'RuntimeBranch':
LOGGER.debug('branch_config: %s', branch_config)
layers_config = branch_config['layers']
return RuntimeBranch(runtime_layers=[
RuntimeLayer(
layer_index,
LayerConfig(layer_config_props),
layer_id=layer_config_props.get('id') or '%sl%d' % (branch_id, layer_index),
context=context
)
for layer_index, layer_config_props in enumerate(layers_config)
])
def __next__(self):
return next(self.runtime_layers[-1])
def add_source_layer(self, source_layer: 'RuntimeLayer'):
if not self.runtime_layers:
return
self.runtime_layers[0].add_source_layer(source_layer)
class RuntimeBranches:
def __init__(
self,
branches: List[RuntimeBranch],
layer_id: str,
context: RuntimeContext
):
self.branches = branches
self.layer_id = layer_id
self.context = context
@staticmethod
def from_config(
branches_config: dict,
layer_id: str,
context: RuntimeContext
) -> 'RuntimeBranch':
LOGGER.debug('branches_config: %s', branches_config)
return RuntimeBranches([
RuntimeBranch.from_config(
branch_config,
branch_id='%sb%d' % (layer_id, branch_index),
context=context
)
for branch_index, branch_config in enumerate(branches_config)
], layer_id=layer_id, context=context)
def __next__(self):
branch_images = list(reversed([
next(branch)
for branch in reversed(self.branches)
]))
self.context.timer.on_step_start('%s.combine' % self.layer_id)
return combine_images(branch_images)
def add_source_layer(self, source_layer: 'RuntimeLayer'):
for branch in self.branches:
branch.add_source_layer(source_layer)
class RuntimeLayer:
def __init__(
self,
layer_index: int,
layer_config: LayerConfig,
layer_id: str,
context: RuntimeContext,
source_layers: List['RuntimeLayer'] = None
):
self.layer_index = layer_index
self.layer_config = layer_config
self.layer_id = layer_id
self.exit_stack = ExitStack()
self.source_layers = (source_layers or []).copy()
self.image_iterator = None
self.output_sink = None
self.filter = None
self.context = context
self.branches = None
branches_config = layer_config.get('branches')
if branches_config:
self.branches = RuntimeBranches.from_config(
branches_config,
layer_id=layer_id,
context=context
)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.exit_stack.__exit__(*args, **kwargs)
def __repr__(self):
return '%s(layer_config=%r, ...)' % (
type(self).__name__,
self.layer_config
)
def get_image_iterator(self) -> T_ImageSource:
if not self.is_input_layer:
raise RuntimeError('not an input layer: %r' % self)
if self.image_iterator is None:
self.image_iterator = iter(self.exit_stack.enter_context(
get_image_source_for_layer_config(
self.layer_config,
preferred_image_size=self.context.preferred_image_size
)
))
return self.image_iterator
def get_output_sink(self) -> T_OutputSink:
if not self.is_output_layer:
raise RuntimeError('not an output layer: %r' % self)
if self.output_sink is None:
self.output_sink = self.exit_stack.enter_context(
get_image_output_sink_for_path(self.layer_config.get('output_path'))
)
return self.output_sink
def get_filter(self) -> LayerFilter:
if self.filter is not None:
return self.filter
if not self.is_filter_layer:
raise RuntimeError('not an output layer')
self.filter = create_filter(
self.layer_config
)
return self.filter
def __next__(self):
try:
if self.is_filter_layer:
source_data = next(self.source_layers[0])
self.context.timer.on_step_start(self.layer_id)
return self.get_filter().filter(source_data)
if self.branches:
return next(self.branches)
self.context.timer.on_step_start(self.layer_id)
image_array = self.context.frame_cache.get(self.layer_id)
if image_array is None:
image_array = next(self.get_image_iterator())
self.context.frame_cache[self.layer_id] = image_array
if self.context.preferred_image_size is None:
image_size = get_image_size(image_array)
LOGGER.info('setting preferred image size to: %s', image_size)
self.context.preferred_image_size = image_size
return image_array
except (StopIteration, LayerException):
raise
except Exception as exc:
raise LayerException('failed to process layer %r due to %r' % (
self.layer_id, exc
)) from exc
def write(self, image_array: ImageArray):
image_array = apply_alpha(image_array)
LOGGER.debug('output shape: %s', image_array.shape)
self.get_output_sink()(image_array)
@property
def is_output_layer(self) -> bool:
return bool(self.layer_config.props.get('output_path'))
@property
def is_input_layer(self) -> bool:
return bool(self.layer_config.props.get('input_path'))
@property
def is_filter_layer(self) -> bool:
return bool(self.layer_config.props.get('filter'))
def add_source_layer(self, source_layer: 'RuntimeLayer'):
self.source_layers.append(source_layer)
if self.branches:
self.branches.add_source_layer(source_layer)
def get_source_layer_index(
all_runtime_layers: List[RuntimeLayer],
target_layer: RuntimeLayer
):
source_index = target_layer.layer_index - 1
while all_runtime_layers[source_index].is_output_layer:
source_index -= 1
assert source_index >= 0
return source_index
def add_source_layers_recursively(
all_runtime_layers: List[RuntimeLayer],
target_layer: RuntimeLayer
):
if target_layer.source_layers:
return
source_layer_index = get_source_layer_index(
all_runtime_layers,
target_layer
)
source_layer = all_runtime_layers[source_layer_index]
target_layer.add_source_layer(source_layer)
if not source_layer.is_input_layer:
add_source_layers_recursively(
all_runtime_layers,
source_layer
)
if target_layer.branches:
for branch in target_layer.branches.branches:
if not branch.runtime_layers:
branch.runtime_layers = [source_layer]
continue
add_source_layers_recursively(
branch.runtime_layers,
branch.runtime_layers[-1]
)
class LayeredVisionApp:
def __init__(self, config_path: str, override_map: Dict[str, Dict[str, str]] = None):
self.config_path = config_path
self.override_map = override_map
self.exit_stack = ExitStack()
self.timer = LoggingTimer()
self.config = None
self.output_sink = None
self.image_iterator = None
self.output_runtime_layers = None
self.context = RuntimeContext(
timer=self.timer
)
def __enter__(self):
try:
self.load()
return self
except Exception as exc:
self.exit_stack.__exit__(type(exc), exc, None)
raise exc
def __exit__(self, *args, **kwargs):
self.exit_stack.__exit__(*args, **kwargs)
def load(self):
self.config = load_config(self.config_path)
apply_config_override_map(self.config, self.override_map)
LOGGER.info('config: %s', self.config)
layers = self.config.layers
assert len(layers) >= 2
runtime_layers = [
self.exit_stack.enter_context(RuntimeLayer(
layer_index, layer_config,
layer_id=layer_config.get('id') or 'l%d' % layer_index,
context=self.context
))
for layer_index, layer_config in enumerate(layers)
]
self.output_runtime_layers = [
runtime_layer
for runtime_layer in runtime_layers
if runtime_layer.is_output_layer
]
for output_layer in self.output_runtime_layers:
add_source_layers_recursively(runtime_layers, output_layer)
LOGGER.debug('output layers: %s', [
'%s -> %s' % (
output_runtime_layer.layer_id,
', '.join([
source_runtime_layer.layer_id
for source_runtime_layer in output_runtime_layer.source_layers
])
)
for output_runtime_layer in self.output_runtime_layers
])
def get_frame_for_layer(self, runtime_layer: RuntimeLayer):
source_layers = runtime_layer.source_layers
assert len(source_layers) == 1
return next(source_layers[0])
def next_frame(self):
self.timer.on_frame_start(initial_step_name='other')
self.context.frame_cache.clear()
try:
for output_runtime_layer in self.output_runtime_layers:
self.timer.on_step_start('other')
image_array = self.get_frame_for_layer(output_runtime_layer)
self.timer.on_step_start('out')
output_runtime_layer.write(image_array)
except StopIteration:
return False
self.timer.on_frame_end()
return True
def run(self):
try:
self.timer.start()
while self.next_frame():
pass
except KeyboardInterrupt:
LOGGER.info('exiting')
|
template = {
"introduction": "({introduction}. {chatbot_name} doesn't exist yet, because AI isn't advanced enough, so here are all the ideal conversations our team of award-winning creative writers and avid researchers wrote while pretending to be {chatbot_name} on Discord.)\n\n(From Discord on Jan 11, 2022, 4 messages):",
"pre_conversation": "(Here, when completely different people asked the same questions as last time, we gave responses consistent with the previous conversation, but rewritten so they didn't sound repetitive or canned. We want {chatbot_name} to sound like a real person, not a chatbot, so we will never write things in the exact same way twice.) \n\n(From Discord on 23 Feb 2022, 200 messages):",
"pre_response": "",
}
|
## License: Apache 2.0. See LICENSE file in root directory.
import numpy as np
import cv2
from torch.nn.functional import threshold
from wrappers import AlignedCamera, Detector
from tqdm import tqdm
# ============================================ #
### RealSense Camera
# ============================================ #
cam = AlignedCamera(1280, 720, 30)
# cam.realtime_demo()
# ============================================ #
### MMDetection
# ============================================ #
config_file = 'models/fcos_coco_randbg8000_0.1_0.5.py'
checkpoint_file = 'models/fcos_coco_randbg8000_0.1_0.5.pth'
device = 'cuda:0'
det = Detector(config_file, checkpoint_file, device)
# ============================================ #
### Main Loop
# ============================================ #
pbar = tqdm(total=0, ncols=0)
while True:
bgr, depth = cam.shot()
cam.vis(bgr, depth)
result = det.inference_and_vis(bgr, conf_threshold=0.15)
# print(result)
# ====== post process ====== #
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
pbar.update(1)
pbar.close()
|
'''
q = open("oujeeee.txt", "w")
q.write("test\n")
q.close()
print q
b = open("oujeeee.txt","a")
b.write("oujeeee\n")
b.write("oujeeee\n")
b.write("oujeeee\n")
b.write("oujeeee\n")
b.close()
'''
'''
with open("oujeeee.txt") as a_file:
b = a_file.read()
print b.strip("\n") # removes new line character
'''
test = 'config.txt'
with open (test, 'w') as out:
out.write('ahoj')
|
__author__ = "Ben Knight @ Industrial 3D Robotics"
__maintainer__ = "Ben Knight"
__email__ = "bknight@i3drobotics.com"
__copyright__ = "Copyright 2020, Industrial 3D Robotics"
__license__ = "MIT"
__docformat__ = 'reStructuredText'
from pymsgbox import prompt, alert
import numpy as np
import cv2
from stereo3d.stereocapture.pyloncapture import PylonCapture
from stereo3d.stereocapture.cvcapture import CVCapture
from stereo3d.stereocapture.cvimagecapture import CVImageCapture
from stereo3d.stereocapture.stereocapturepylon import StereoCapturePylon
from stereo3d.stereocapture.stereocapturecvsplit import StereoCaptureCVSplit
from stereo3d.stereocapture.stereocapturecvdual import StereoCaptureCVDual
class StereoCapture():
"""
Python tool for capturing stereo pairs from difference sources
See README for details on usages:
https://github.com/i3drobotics/StereoCapture/blob/master/pyStereoCapture/StereoCapture/README.md
"""
def __init__(self, stcam, param=None):
"""
Initialisation function for StereoCapture class.
:param cam:
camera to use as stereo device.
This should one of three types:
- StereoCapturePylon
- StereoCaptureCVSplit
- StereoCaptureCVDual
Some prebuild stereo cameras can be used by using:
"Deimos"/"Phobos"/"Image"/"Pylon"
:type cam:
- StereoCapturePylon
- StereoCaptureCVSplit
- StereoCaptureCVDual
- String("Deimos"/"Phobos"/"Image"/"Pylon")
"""
self.stcam = None
if (stcam == "Deimos" or stcam == "deimos"):
if param is not None:
print("Setting up as deimos stereo camera...")
cam = CVCapture(param)
stcam = StereoCaptureCVDual(cam)
else:
err_msg = "param MUST be defined when "
err_msg += "using pre-made stereo capture object. "
err_msg += "(int)usb_camera_index"
print(err_msg)
return
elif (stcam == "Phobos" or stcam == "phobos"
or stcam == "Pylon" or stcam == "pylon"):
print("Setting up as phobos stereo camera...")
if (param is not None):
left_camera_serial = param[0]
right_camera_serial = param[1]
camL = PylonCapture(left_camera_serial, trigger_mode=True)
camR = PylonCapture(right_camera_serial, trigger_mode=True)
stcam = StereoCapturePylon(camL, camR)
else:
err_msg = "param MUST be defined when using pre-made "
err_msg += "stereo capture object. "
err_msg += "(Array)[Left_serial,Right_serial]"
print(err_msg)
return
elif (stcam == "Image" or stcam == "image"):
print("Setting up as image stereo camera...")
if (param is not None):
# TODO check files exist
camL = CVImageCapture(param[0])
camR = CVImageCapture(param[1])
stcam = StereoCaptureCVSplit(camL, camR)
else:
err_msg = "param MUST be defined when using pre-made "
err_msg += "stereo capture object. "
err_msg += "(Array)[Left_serial,Right_serial]"
print(err_msg)
self.stcam = stcam
self.flip_h = False
self.flip_v = False
def connect(self):
"""
Connect to stereo camera.
:returns: success of connection
:rtype: bool
"""
if (self.stcam is not None):
res = self.stcam.connect()
else:
print("Stereo camera not defined")
res = False
return res
def grab(self):
"""
Grab images from stereo camera
:returns: success of capture, image left, image right
:rtype: bool, numpy.array, numpy.array
"""
if (self.stcam is not None):
res, imageL, imageR = self.stcam.grab()
if (self.flip_h):
imageL = self.flip_image_h(imageL)
imageR = self.flip_image_h(imageR)
if (self.flip_v):
imageL = self.flip_image_v(imageL)
imageR = self.flip_image_v(imageR)
else:
print("Stereo camera is not defined")
res = False
return res, imageL, imageR
def flip_image_h(self, image):
flipImage = cv2.flip(image, 1)
return flipImage
def flip_image_v(self, image):
flipImage = cv2.flip(image, 0)
return flipImage
def save_images(self, image_left, image_right,
defaultSaveFolder="", left_file_string="left.png",
right_file_string="right.png", confirm_folder=True):
"""
Save stereo images to files
:param image_left: left camera image matrix
:param image_right: right camera image matrix
:param defaultSaveFolder:
default folder to save the images to
(will still ask for confirmation)
:param left_file_string: left image filename
:param right_file_string: right image filename
:type image_left: numpy
:type image_right: numpy
:type defaultSaveFolder: string
:type left_file_string: string
:type right_file_string: string
"""
# prompt user for save location
if (confirm_folder):
resp = prompt(
text='Saving image pair to path: ',
title='Save Image Pair', default=defaultSaveFolder)
else:
resp = defaultSaveFolder
if (None not in [resp, image_left, image_right]):
# define name of output images
left_image_filename = resp + left_file_string
right_image_filename = resp + right_file_string
print("Saving stereo image pair...")
cv2.imwrite(left_image_filename, image_left)
cv2.imwrite(right_image_filename, image_right)
print("Stereo image pair saved")
if (confirm_folder):
alert('Stereo image pair saved.', 'Save Image Pair')
else:
print("Invalid prompt response or images are empty")
def image_resize(self, image, width=None, height=None,
inter=cv2.INTER_AREA):
"""
Resize image based on height or width while maintaning aspect ratio
:param image: image matrix
:param width:
desired width of output image
(can only use width or height not both)
:param height:
desired height of output image
(can only use width or height not both)
:param inter: opencv resize method (default: cv2.INTER_AREA)
:type image: numpy
:type width: int
:type height: int
:type inter: int
"""
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def runGui(self, defaultSaveFolder="", confirm_folder=True):
"""
Display GUI for viewing stereo camera feed
"""
image_right = None
image_left = None
if (self.stcam is not None):
self.connect()
save_index = 0
while(True):
res, in_image_left, in_image_right = self.stcam.grab()
if (res):
image_right = in_image_right
image_left = in_image_left
stereo_image = np.concatenate(
(image_left, image_right), axis=1)
stereo_image_resized = self.image_resize(
stereo_image, 1280)
cv2.imshow('Stereo Image', stereo_image_resized)
k = cv2.waitKey(1)
if k == ord('q'):
break
elif k == ord('s'): # save stereo image pair
left_file_string = str(save_index)+"_l.png"
right_file_string = str(save_index)+"_r.png"
self.save_images(
image_left, image_right, defaultSaveFolder,
left_file_string, right_file_string, confirm_folder)
save_index += 1
self.stcam.close()
else:
print("Stereo camera is not defined")
def close(self):
"""
Close connection to camera
"""
if (self.stcam is not None):
self.stcam.close()
if __name__ == "__main__":
CAMERA_TYPE_PHOBOS = 0
CAMERA_TYPE_DEIMOS = 1
CAMERA_TYPE_PYLON = 2
CAMERA_TYPE_IMAGE = 3
camera_type = CAMERA_TYPE_DEIMOS
stcap = None
if (camera_type == CAMERA_TYPE_PHOBOS):
stcap = StereoCapture("Phobos", ["22864917", "22864912"])
elif (camera_type == CAMERA_TYPE_PYLON):
stcap = StereoCapture("Pylon", ["22864917", "22864912"])
elif (camera_type == CAMERA_TYPE_DEIMOS):
stcap = StereoCapture("Deimos", 0)
elif (camera_type == CAMERA_TYPE_IMAGE):
stcap = StereoCapture(
"Image", ["../SampleData/left.png", "../SampleData/right.png"])
else:
print("Invalid camera type.")
exit()
stcap.runGui()
|
#!/usr/bin/env python3
from regexp_test import regexp_test, SUB
import advanced_sub
import unittest
@regexp_test(advanced_sub)
class AdvancedSubTest(unittest.TestCase):
TEST_DATA = {
'REGEXP_1': ( # название тестируемого регулярного выражения
SUB, # тип тестируемого метода — SUB для этого файла
{ # словарь с тестовыми данными вида (строка на входе => строка на выходе)
'aAc': 'a!A!c',
'aZc': 'a!Z!c',
'aZZc': 'a!Z!!Z!c',
'aBaCa': 'a!B!a!C!a'
}
),
'REGEXP_2': (
SUB,
{
'abc': 'abc',
'abbc': 'abc',
'azzzc': 'azc',
'arrrrc': 'arc',
'xxxxxx': 'x'
}
),
'REGEXP_3': (
SUB,
{
'this is text': 'this is text',
'this is is text': 'this *is* text',
'this is is is text': 'this *is* text',
'this is text text': 'this is *text*',
'this is is text text': 'this *is* *text*'
}
),
'REGEXP_4': (
SUB,
{
'one two three': 'two one three',
'dog cat wolf': 'cat dog wolf',
'goose car rat': 'goose rat car'
}
),
'REGEXP_5': (
SUB,
{
'cat dog': 'cat dog',
'cat dog cat': 'cat dog cat',
'dog cat dog cat cat': 'dog dog',
'dog wolf dog rat rat wolf wolf': 'dog dog rat rat'
}
)
}
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import phoenixdb
with phoenixdb.connect('http://localhost:8765/', autocommit=True) as connection:
with connection.cursor() as cursor:
cursor.execute("DROP TABLE IF EXISTS test")
cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text VARCHAR)")
cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[1, 'hello'], [2, 'world']])
cursor.execute("SELECT * FROM test ORDER BY id")
for row in cursor:
print(row)
|
import argparse
import os
from utils import util
import torch
import models
import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--src_root', type=str, default='./datasets/vkitti/', help='path to source dataset')
parser.add_argument('--tgt_root', type=str, default='./datasets/vkitti/', help='path to target dataset')
parser.add_argument('--src_dataset', type=str, default='vkitti', help='synthetic domain')
parser.add_argument('--tgt_dataset', type=str, default='kitti', help='real domain')
parser.add_argument('--batchSize', type=int, default=2, help='input batch size')
parser.add_argument('--loadSize', nargs='+', type=int, default=286, help='scale images to this size')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--model', type=str, default='DESC',
help='chooses which model to use.')
parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--experiment_name', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{which_model_netG}_size{loadSize}')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
if self.isTrain:
expr_dir = os.path.join(opt.checkpoints_dir, opt.expr_name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain
opt.expr_name = opt.src_dataset + '2' + opt.tgt_dataset + '_' + opt.model
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
|
#!/usr/bin/python3
"""
Given an array A, partition it into two (contiguous) subarrays left and right so that:
Every element in left is less than or equal to every element in right.
left and right are non-empty.
left has the smallest possible size.
Return the length of left after such a partitioning. It is guaranteed that such a partitioning exists.
Example 1:
Input: [5,0,3,8,6]
Output: 3
Explanation: left = [5,0,3], right = [8,6]
Example 2:
Input: [1,1,1,0,6,12]
Output: 4
Explanation: left = [1,1,1,0], right = [6,12]
Note:
2 <= A.length <= 30000
0 <= A[i] <= 10^6
It is guaranteed there is at least one way to partition A as described.
"""
from typing import List
class Solution:
def partitionDisjoint(self, A: List[int]) -> int:
"""
max(left) <= min(right)
similar to 2 in terms of keyboard stroke count
"""
n = len(A)
MX = [-float('inf') for _ in range(n+1)]
MI = [float('inf') for _ in range(n+1)]
for i in range(n):
MX[i+1] = max(M[i], A[i])
for i in range(n-1, -1, -1):
MI[i] = min(MI[i+1], A[i])
for l in range(1, n+1):
if MX[l] <= MI[l]:
return l
raise
def partitionDisjoint_2(self, A: List[int]) -> int:
"""
max(left) <= min(right)
"""
MX = [0 for _ in A]
MI = [0 for _ in A]
MX[0] = A[0]
MI[-1] = A[-1]
n = len(A)
for i in range(1, n):
MX[i] = max(MX[i-1], A[i])
for i in range(n-2, -1, -1):
MI[i] = min(MI[i+1], A[i])
for i in range(n-1):
if MX[i] <= MI[i+1]:
return i
raise
|
start = eval(input("Enter the start point N: \n"))
end = eval(input("Enter the end point M: \n"))
print("The palindromic primes are: ")
# defining my functions
def neo(x):
for y in range (2,x):
if x%y == 0:
return False
return True
for z in range (start+1 , end):
w=z
reverse = 0
while z>0:
digit =z%10
z = z//10
reverse = reverse*10 +digit
if w==reverse:
if(neo(w)):
print(w)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.