content
stringlengths 5
1.05M
|
|---|
"""Review a change"""
import argparse
import logging
import os
from libpycr.editor import raw_input_editor, strip_comments
from libpycr.exceptions import NoSuchChangeError, PyCRError
from libpycr.gerrit.client import Gerrit
from libpycr.meta import GitClBuiltin
from libpycr.utils.output import Formatter, NEW_LINE
from libpycr.utils.system import ask, fail
class Review(GitClBuiltin):
"""Implement the REVIEW command"""
log = logging.getLogger(__name__)
@property
def description(self):
return 'code-review a change'
@staticmethod
def parse_command_line(arguments):
"""Parse the SUBMIT command command-line arguments
Returns a tuple with a change_id and a revision_id.
:param arguments: a list of command-line arguments to parse
:type arguments: list[str]
:rtype: str, str
"""
parser = argparse.ArgumentParser(description='Code-review a change')
parser.add_argument(
'change_id', metavar='CL',
help='Gerrit Code Review CL / CL range / Change-Id')
parser.add_argument(
'score', help='the score of the review', default=None,
choices=Gerrit.SCORES, nargs='?')
parser.add_argument('-m', '--message', help='the review comment')
parser.add_argument(
'-l', '--label', default='Code-Review',
help='the label to score (default: Code-Review)')
cmdline = parser.parse_args(arguments)
return cmdline.change_id, cmdline.score, cmdline.message, cmdline.label
@staticmethod
def tokenize(change, review):
"""Token generator for the output
Yields a stream of tokens: tuple of (Token, string).
:param change: the change
:type change: ChangeInfo
:param review: the review
:type review: ReviewInfo
:yield: tuple[Token, str]
"""
for token in change.tokenize():
yield token
yield NEW_LINE
yield NEW_LINE
for token in review.tokenize():
yield token
def run(self, arguments, *args, **kwargs):
change_id, score, message, label = self.parse_command_line(arguments)
try:
change = Gerrit.get_change(change_id)
if message is None:
initial_content = [
'',
('# Please enter the comment message for your review. '
'Lines starting'),
"# with '#' will be ignored.",
'#'
]
initial_content.extend(
['# %s' % line for line in change.raw_str().splitlines()])
initial_content.append('#')
message = raw_input_editor(os.linesep.join(initial_content))
message = strip_comments(message)
if score is None:
score = ask('Please enter your review score', Gerrit.SCORES)
review = Gerrit.set_review(score, message, change.uuid, label)
except NoSuchChangeError as why:
self.log.debug(str(why))
fail('invalid change')
except PyCRError as why:
fail('cannot post review', why)
print Formatter.format(self.tokenize(change, review))
|
from discord.ext import commands
import discord, praw, datetime, json, os
version_number = "1.4.2"
version = version_number + " Created by bwac#2517"
red = 0xFF0000
with open("secrets.json") as json_file:
secrets = json.load(json_file)
trophyemojis = None
with open("trophyemoji.json") as json_file:
trophyemojis = json.load(json_file)
# set if this is production or not
production = False
if os.path.isfile("production"):
production = True
class user(commands.Cog):
def __init__(self, bot):
self.bot = bot
type(self).__name__ = ""
@commands.command(aliases=["u"])
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.guild_only()
async def user(self, ctx, username=None):
"""User command, r!user"""
if username:
loading = discord.Embed(title="", ncolor=red)
loading.add_field(
name="Loading...",
value="<a:loading:650579775433474088> Contacting reddit servers...",
)
loading.set_footer(text="if it never loads, RedditBot can't find the user")
loadingMessage = await ctx.send(embed=loading)
reddit = praw.Reddit(
client_id=secrets["reddit_id"],
client_secret=secrets["reddit_secret"],
user_agent="discord:n/a:" + version_number + " (by /u/-_-BWAC-_-)",
)
loading = discord.Embed(title="", color=red)
loading.add_field(
name="Loading...",
value="<a:loading:650579775433474088> Getting profile info...",
)
loading.set_footer(
text="if it never loads, something went wrong behind the scenes"
)
await loadingMessage.edit(embed=loading)
time_cached = None
name = None
karma = None
link_karma = None
cake_day = None
is_employee = None
user_r = reddit.redditor(username) # makes user
if os.path.isfile("cache/users/" + username + ".json"):
# If cache exists, read from it
loading = discord.Embed(title="", color=red)
loading.add_field(
name="Cache...",
value="<a:loading:650579775433474088> cache found! now loading from",
)
loading.set_footer(
text="if it never loads, something went wrong in the backround, or the username cant be found"
)
await loadingMessage.edit(embed=loading)
with open("cache/users/" + username + ".json") as json_file:
cache = json.load(json_file)
time_cached = cache["time_cached"]
name = cache["name"]
karma = cache["karma"]
link_karma = cache["link_karma"]
cake_day = cache["cake_day"]
is_employee = cache["is_employee"]
else:
name = username
karma = user_r.comment_karma
link_karma = user_r.link_karma
cake_day = datetime.datetime.fromtimestamp(
int(user_r.created)
).strftime("%m/%d/%Y")
is_employee = user_r.is_employee
cache = {
"time_cached": str(datetime.datetime.now()),
"name": name,
"karma": karma,
"link_karma": link_karma,
"cake_day": cake_day,
"is_employee": is_employee,
}
with open("cache/users/" + username + ".json", "w") as outfile:
json.dump(cache, outfile)
user = discord.Embed(title="u/" + username + " info:", color=red)
user.add_field(name="Karma:", value=karma)
user.add_field(name="Link karma:", value=link_karma)
user.add_field(name="All karma:", value=link_karma + karma)
user.add_field(name="Cake Day:", value=cake_day)
if time_cached:
user.add_field(
name="*these results are from a cache made at*:",
value=time_cached,
inline=False,
)
user.add_field(
name="*if you want the latest stats, use r!resetuser "
+ username
+ "*",
value="keep in mind that you should only reset a user cache every so often",
inline=False,
)
trophiestxt = ""
for trophy in user_r.trophies():
emoji = ""
if trophy.name in trophyemojis:
emoji = trophyemojis.get(trophy.name)
if len(trophiestxt) > 900:
trophiestxt = (
trophiestxt
+ "All the trophies are too long to send in a discord embed value so I "
"shortened them "
)
break
trophiestxt = trophiestxt + emoji + trophy.name + "\n"
user.add_field(name="Trophies:", value=trophiestxt)
if is_employee:
user.add_field(
name="This user", value="is an employee of reddit", inline=False
)
user.set_author(
name="RedditBot",
icon_url="https://images.discordapp.net/avatars/437439562386505730/2874f76dd780cb0af624e3049a6bfad0.png",
)
user.set_thumbnail(url=user_r.icon_img)
user.set_footer(text="RedditBot " + version)
await loadingMessage.edit(embed=user)
else:
error = discord.Embed(
title="You didn't give a username!\n\nYou should use this command like:\nr!user `"
"username`",
color=red,
)
error.set_footer(text=version)
await ctx.send(embed=error)
@commands.command(name="resetuser")
async def resetuser(self, ctx, user_name=None):
"""resets a user cache"""
if user_name:
loading = discord.Embed(title="", color=red)
loading.add_field(
name="Deleting cache...", value="<a:loading:650579775433474088>"
)
loading.set_footer(text="if it never loads, RedditBot can't find the user")
loadingMessage = await ctx.send(embed=loading)
if os.path.isfile("cache/users/" + user_name + ".json"):
os.remove("cache/users/" + user_name + ".json")
loading = discord.Embed(title="", color=red)
loading.add_field(
name="Deleted!...", value="now say r!user " + user_name
)
await loadingMessage.edit(embed=loading)
else:
loading = discord.Embed(title="", color=red)
loading.add_field(
name="No cache!...", value="try saying r!user " + user_name
)
await loadingMessage.edit(embed=loading)
else:
error = discord.Embed(
title="You didn't give a user name!\n\nYou should use this command like:\nr!resetuser `"
"user name`",
color=red,
)
error.set_footer(text=version)
await ctx.send(embed=error)
def setup(bot):
bot.add_cog(user(bot))
|
import setuptools
import PyVuka.pyvuka as pvk
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name=pvk.__app_name__,
version=pvk.__version__,
author=pvk.__author__,
author_email=pvk.__email__,
description=pvk.__description__,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bostonautolytics/pyvuka",
packages=setuptools.find_packages(),
install_requires=["asteval>=0.9.18", "chardet>=3.0.4", "lmfit>=1.0.0", "matplotlib>=3.1.3", "numpy>=1.18.1",
"pack64>=2.0.1", "Pillow>=7.0.0", "psutil>=5.6.7", "PyQt5>=5.14.1", "scipy>=1.3.1",
"xlrd>=1.2.0", "XlsxWriter>=1.2.7"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: Free For Educational Use",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
import os, sys
import numpy as np
import tensorflow as tf
import model
from model.util import *
class PseudoLabeling:
"""Pseudo-labeling function, which is implemented in the python iterator"""
def __init__(self, model_base, model_conf, ld_src, ld_tar, ideal=False):
"""
model_base: a classifier
model_conf: a confidence predictor
ld_src: source data loader
ld_tar: target data loader
"""
self.model_base = model_base
self.model_conf = model_conf
self.ld_src = ld_src
self.ld_tar = ld_tar
self.ideal = ideal
def __iter__(self):
"""Initialize iterators."""
self.iter_end = {'src': False, 'tar': False}
self.iter = {'src': iter(self.ld_src), 'tar': iter(self.ld_tar)}
return self
def __next__(self):
"""return the next labeled examples"""
## source
def sample_from_source():
return next(self.iter['src'])
try:
x, y = sample_from_source()
except StopIteration:
self.iter_end['src'] = True
if self.iter_end['src'] and self.iter_end['tar']:
raise StopIteration
else:
self.iter['src'] = iter(self.ld_src)
x, y = sample_from_source()
x_src, y_src = x, y
## target
def sample_from_target():
while True:
x, y = next(self.iter['tar'])
if len(x.shape) == 5:
x_tc, x_st = x[:, 0, :, :, :], x[:, 1, :, :, :] # two different augmentation
else:
x_tc, x_st = x, x
y_pred = self.model_base(x_tc)['y_pred']
conf = self.model_conf(x_tc)
if self.ideal:
x_conf_i, y_conf_i, y_true_i = x_st[conf==1], y[conf==1], y[conf==1]
else:
x_conf_i, y_conf_i, y_true_i = x_st[conf==1], y_pred[conf==1], y[conf==1]
if any(conf==1):
break
## upsample
n_repeat = x_st.shape[0] // x_conf_i.shape[0]
x_conf = tf.repeat(x_conf_i, n_repeat, 0)
y_conf = tf.repeat(y_conf_i, n_repeat, 0)
y_true = tf.repeat(y_true_i, n_repeat, 0)
n_remain = x_st.shape[0] - x_conf.shape[0]
if n_remain > 0:
x_conf = tf.concat((x_conf, x_conf_i[:n_remain]), 0)
y_conf = tf.concat((y_conf, y_conf_i[:n_remain]), 0)
y_true = tf.concat((y_true, y_true_i[:n_remain]), 0)
return x_conf, y_conf
try:
x, y = sample_from_target()
except StopIteration:
self.iter_end['tar'] = True
if self.iter_end['src'] and self.iter_end['tar']:
raise StopIteration
else:
self.iter['tar'] = iter(self.ld_tar)
try:
x, y = sample_from_target()
except StopIteration:
# it is possible that there are no confident examples
x, y = None, None
x_tar, y_tar = x, y
## merge
if x_tar is not None:
x = tf.concat((x_src, x_tar), 0)
y = tf.concat((tf.cast(y_src, tf.int64), tf.cast(y_tar, tf.int64)), 0)
else:
x, y = x_src, y_src
return x, y
class TargetPseudoLabeling:
"""Pseudo-labeling function, which is used only for target."""
def __init__(self, model_base, model_conf, ld_tar, ideal=False):
"""
model_base: a classifier
model_conf: a confidence predictor
ld_src: source data loader
ld_tar: target data loader
"""
self.model_base = model_base
self.model_conf = model_conf
self.ld_tar = ld_tar
self.ideal = ideal
def __iter__(self):
"""Initialize an iterator."""
self.iter = iter(self.ld_tar)
return self
def __next__(self):
"""return the next labeled examples"""
def sample_from_target():
while True:
x, y = next(self.iter)
if len(x.shape) == 5:
x_tc, x_st = x[:, 0, :, :, :], x[:, 1, :, :, :]
else:
x_tc, x_st = x, x
y_pred = self.model_base(x_tc)['y_pred']
conf = self.model_conf(x_tc) ##TODO: return dict?
if self.ideal:
x_conf_i, y_conf_i, y_true_i = x_st[conf==1], y[conf==1], y[conf==1]
else:
x_conf_i, y_conf_i, y_true_i = x_st[conf==1], y_pred[conf==1], y[conf==1]
if any(conf==1):
break
return x_conf_i, y_conf_i
return sample_from_target()
class Teacher(tf.keras.Model):
"""Teacher model, which includes a pseudo-labeling function. It acts like data loaders."""
def __init__(self, params, model_base, ds_src, ds_tar, ideal=False):
"""
Initialize a teacher model
params: required model parmeters
model_base: a classifier in the keras model
ds_src: source dataset loader, which includes train/val/test loaders
ds_tar: target dataset loader, which includes train/val/test loaders
idea: If True, the pseudo-labeling function returns the true labels.
"""
super().__init__()
self.model_base = model_base
self.ideal = ideal
self.model_conf = getattr(model, params.conf)(self.model_base)
self.train = PseudoLabeling(self.model_base, self.model_conf, ds_src.train, ds_tar.train, ideal)
self.val = TargetPseudoLabeling(self.model_base, self.model_conf, ds_tar.val, ideal) ## target only
self.test = TargetPseudoLabeling(self.model_base, self.model_conf, ds_tar.test, ideal) ## target only
class Student(Teacher):
"""Stduent model"""
def __init__(self, params, model_base, ds_src, ds_tar, ideal=False):
super().__init__(params, model_base, ds_src, ds_tar, ideal)
|
"""The Strava integration."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from . import config_flow
from .const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN, SCAN_INTERVAL
from .strava_api import StravaAPI
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
_LOGGER.setLevel(logging.DEBUG)
PLATFORMS: list[Platform] = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Strava from a config entry."""
hass.data.setdefault(DOMAIN, {})
# OAuth Stuff
try:
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
except ValueError:
implementation = config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
entry.data[CONF_CLIENT_ID],
entry.data[CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
config_flow.OAuth2FlowHandler.async_register_implementation(
hass, implementation
)
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
await session.async_ensure_token_valid()
async def async_update_data():
strava = StravaAPI(session)
data = await strava.fetch_strava_data()
return data
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Strava Ride",
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import binascii
import os
import struct
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/nfcpy')
import nfc
num_blocks = 20
service_code = 0x090f
class StationRecord(object):
db = None
def __init__(self, row):
self.area_key = int(row[0], 10)
self.line_key = int(row[1], 10)
self.station_key = int(row[2], 10)
self.company_value = row[3]
self.line_value = row[4]
self.station_value = row[5]
@classmethod
def get_none(cls):
# 駅データが見つからないときに使う
return cls(["0", "0", "0", "None", "None", "None"])
@classmethod
def get_db(cls, filename):
# 駅データのcsvを読み込んでキャッシュする
if cls.db == None:
cls.db = []
for row in csv.reader(open(filename, 'rU'), delimiter=',', dialect=csv.excel_tab):
cls.db.append(cls(row))
return cls.db
@classmethod
def get_station(cls, line_key, station_key):
# 線区コードと駅コードに対応するStationRecordを検索する
for station in cls.get_db("StationCode.csv"):
if station.line_key == line_key and station.station_key == station_key:
return station
return cls.get_none()
class HistoryRecord(object):
def __init__(self, data):
# ビッグエンディアンでバイト列を解釈する
row_be = struct.unpack('>2B2H4BH4B', data)
# リトルエンディアンでバイト列を解釈する
row_le = struct.unpack('<2B2H4BH4B', data)
self.db = None
self.console = self.get_console(row_be[0])
self.process = self.get_process(row_be[1])
self.year = self.get_year(row_be[3])
self.month = self.get_month(row_be[3])
self.day = self.get_day(row_be[3])
self.balance = row_le[8]
self.in_station = StationRecord.get_station(row_be[4], row_be[5])
self.out_station = StationRecord.get_station(row_be[6], row_be[7])
@classmethod
def get_console(cls, key):
# よく使われそうなもののみ対応
return {
0x03: "精算機",
0x04: "携帯型端末",
0x05: "車載端末",
0x12: "券売機",
0x16: "改札機",
0x1c: "乗継精算機",
0xc8: "自販機",
}.get(key)
@classmethod
def get_process(cls, key):
# よく使われそうなもののみ対応
return {
0x01: "運賃支払",
0x02: "チャージ",
0x0f: "バス",
0x46: "物販",
}.get(key)
@classmethod
def get_year(cls, date):
return (date >> 9) & 0x7f
@classmethod
def get_month(cls, date):
return (date >> 5) & 0x0f
@classmethod
def get_day(cls, date):
return (date >> 0) & 0x1f
def connected(tag):
print tag
if isinstance(tag, nfc.tag.tt3.Type3Tag):
try:
sc = nfc.tag.tt3.ServiceCode(service_code >> 6 ,service_code & 0x3f)
for i in range(num_blocks):
bc = nfc.tag.tt3.BlockCode(i,service=0)
data = tag.read_without_encryption([sc],[bc])
history = HistoryRecord(bytes(data))
print "=== %02d ===" % i
print "端末種: %s" % history.console
print "処理: %s" % history.process
print "日付: %02d-%02d-%02d" % (history.year, history.month, history.day)
print "入線区: %s-%s" % (history.in_station.company_value, history.in_station.line_value)
print "入駅順: %s" % history.in_station.station_value
print "出線区: %s-%s" % (history.out_station.company_value, history.out_station.line_value)
print "出駅順: %s" % history.out_station.station_value
print "残高: %d" % history.balance
print "BIN: "
print "" . join(['%02x ' % s for s in data])
except Exception as e:
print "error: %s" % e
else:
print "error: tag isn't Type3Tag"
if __name__ == "__main__":
clf = nfc.ContactlessFrontend('usb')
clf.connect(rdwr={'on-connect': connected})
|
# -*- coding: utf-8 -*-
"""Adult dataset example.
Neural network model definition
Example:
>>> ./dq0 project create --name demo # doctest: +SKIP
>>> cd demo # doctest: +SKIP
>>> copy user_model.py to demo/model/ # doctest: +SKIP
>>> ../dq0 data list # doctest: +SKIP
>>> ../dq0 model attach --id <dataset id> # doctest: +SKIP
>>> ../dq0 project deploy # doctest: +SKIP
>>> ../dq0 model train # doctest: +SKIP
>>> ../dq0 model state # doctest: +SKIP
>>> ../dq0 model predict --input-path </path/to/numpy.npy> # doctest: +SKIP
>>> ../dq0 model state # doctest: +SKIP
Copyright 2020, Gradient Zero
All rights reserved
"""
import logging
from dq0.sdk.errors.errors import fatal_error
from dq0.sdk.models.tf import NeuralNetworkClassification
logger = logging.getLogger()
class UserModel(NeuralNetworkClassification):
"""Derived from dq0.sdk.models.tf.NeuralNetwork class
Model classes provide a setup method for data and model
definitions.
"""
def __init__(self):
super().__init__()
def setup_data(self, **kwargs):
"""Setup data function
This function can be used to prepare data or perform
other tasks for the training run.
At runtime the selected datset is attached to this model. It
is available as the `data_source` attribute.
For local testing call `model.attach_data_source(some_data_source)`
manually before calling `setup_data()`.
Use `self.data_source.read()` to read the attached data.
"""
from sklearn.model_selection import train_test_split
# get the input dataset
if self.data_source is None:
fatal_error('No data source found', logger=logger)
# read the dataset from the attached input source
data = self.data_source.read()
# do the train test split
X_train_df, X_test_df, y_train_ts, y_test_ts =\
train_test_split(data.iloc[:, :-1],
data.iloc[:, -1],
test_size=0.33
)
self.input_dim = X_train_df.shape[1]
# set data attributes
self.X_train = X_train_df
self.X_test = X_test_df
self.y_train = y_train_ts
self.y_test = y_test_ts
def setup_model(self, **kwargs):
"""Setup model function
Define the model here.
"""
import tensorflow.compat.v1 as tf
self.model = tf.keras.Sequential([
tf.keras.layers.Input(self.input_dim),
tf.keras.layers.Dense(10, activation='tanh'),
tf.keras.layers.Dense(10, activation='tanh'),
tf.keras.layers.Dense(2, activation='softmax')])
self.optimizer = 'Adam'
# As an alternative:
# self.optimizer = tensorflow.keras.optimizers.Adam(
# learning_rate=0.001)
self.epochs = 10
self.batch_size = 250
self.metrics = ['accuracy']
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
# As an alternative, define the loss function with a string
|
#
# Copyright (C) 2009-2021 Alex Smith
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from SCons.Script import *
import builders, image
class BuildManager:
def __init__(self, host_template, target_template):
self.envs = []
self.host_template = host_template
self.target_template = target_template
self.libraries = {}
# Add a reference to ourself to all environments.
self.AddVariable('_MANAGER', self)
# Create compile strings that will be added to all environments.
verbose = ARGUMENTS.get('V') == '1'
def compile_str(msg):
return None if verbose else '\033[0;32m%8s\033[0m $TARGET' % (msg)
def compile_str_func(msg, target, source, env):
return '\033[0;32m%8s\033[0m %s' % (msg, str(target[0]))
self.AddVariable('ARCOMSTR', compile_str('AR'))
self.AddVariable('ASCOMSTR', compile_str('ASM'))
self.AddVariable('ASPPCOMSTR', compile_str('ASM'))
self.AddVariable('CCCOMSTR', compile_str('CC'))
self.AddVariable('SHCCCOMSTR', compile_str('CC'))
self.AddVariable('CXXCOMSTR', compile_str('CXX'))
self.AddVariable('SHCXXCOMSTR', compile_str('CXX'))
self.AddVariable('YACCCOMSTR', compile_str('YACC'))
self.AddVariable('LEXCOMSTR', compile_str('LEX'))
self.AddVariable('LINKCOMSTR', compile_str('LINK'))
self.AddVariable('SHLINKCOMSTR', compile_str('SHLINK'))
self.AddVariable('RANLIBCOMSTR', compile_str('RANLIB'))
self.AddVariable('GENCOMSTR', compile_str('GEN'))
self.AddVariable('STRIPCOMSTR', compile_str('STRIP'))
self.AddVariable('COMPILATIONDB_COMSTR', compile_str('DB'))
if not verbose:
# Substfile doesn't provide a method to override the output. Hack around.
func = lambda t, s, e: compile_str_func('GEN', t, s, e)
self.host_template['BUILDERS']['Substfile'].action.strfunction = func
self.target_template['BUILDERS']['Substfile'].action.strfunction = func
# Add builders from builders.py
self.AddBuilder('LDScript', builders.ld_script_builder)
# Create the distribution environment and various methods to add data
# to an image.
dist = self.CreateBare(name = 'dist', flags = {
'FILES': [],
'LINKS': [],
})
def add_file_method(env, target, path):
env['FILES'].append((path, target))
def add_link_method(env, target, path):
env['LINKS'].append((path, target))
dist.AddMethod(add_file_method, 'AddFile')
dist.AddMethod(add_link_method, 'AddLink')
# Add image builders.
dist['BUILDERS']['FSImage'] = image.fs_image_builder
dist['BUILDERS']['BootImage'] = image.boot_image_builder
dist['BUILDERS']['ISOImage'] = image.iso_image_builder
def __getitem__(self, key):
"""Get an environment by name."""
for (k, v) in self.envs:
if k and k == key:
return v
return None
def AddVariable(self, name, value):
"""Add a variable to all environments and all future environments."""
self.host_template[name] = value
self.target_template[name] = value
for (k, v) in self.envs:
v[name] = value
def AddBuilder(self, name, builder):
"""Add a builder to all environments and all future environments."""
self.host_template['BUILDERS'][name] = builder
self.target_template['BUILDERS'][name] = builder
for (k, v) in self.envs:
v['BUILDERS'][name] = builder
def AddTool(self, name, depends, act):
"""Add a build tool to all environments and all future environments."""
if type(depends) != list:
depends = [depends]
def dep_emitter(target, source, env):
for dep in depends:
Depends(target, dep)
return (target, source)
self.AddBuilder(name, Builder(action = act, emitter = dep_emitter))
def AddLibrary(self, name, build_libraries, include_paths):
self.libraries[name] = {
'build_libraries': build_libraries,
'include_paths': include_paths,
}
def CreateHost(self, **kwargs):
"""Create an environment for building for the host system."""
name = kwargs['name'] if 'name' in kwargs else None
flags = kwargs['flags'] if 'flags' in kwargs else {}
env = self.host_template.Clone()
self.merge_flags(env, flags)
self.envs.append((name, env))
return env
def CreateBare(self, **kwargs):
"""Create an environment for building for the target system."""
name = kwargs['name'] if 'name' in kwargs else None
flags = kwargs['flags'] if 'flags' in kwargs else {}
env = self.target_template.Clone()
self.merge_flags(env, flags)
self.envs.append((name, env))
return env
def Create(self, **kwargs):
"""Create an environment for building for the target system."""
name = kwargs['name'] if 'name' in kwargs else None
flags = kwargs['flags'] if 'flags' in kwargs else {}
libraries = kwargs['libraries'] if 'libraries' in kwargs else []
env = self.target_template.Clone()
config = env['_CONFIG']
# Get the compiler include directory which contains some standard
# headers.
from subprocess import Popen, PIPE
incdir = Popen([env['CC'], '-print-file-name=include'], stdout = PIPE).communicate()[0].strip().decode('utf-8')
# Specify -nostdinc to prevent the compiler from using the automatically
# generated sysroot. That only needs to be used when compiling outside
# the build system, we manage all the header paths internally. We do
# need to add the compiler's own include directory to the path, though.
self.merge_flags(env, {
'ASFLAGS': ['-nostdinc', '-isystem', incdir, '-include',
'build/%s-%s/config.h' % (config['ARCH'], config['BUILD'])],
'CCFLAGS': ['-nostdinc', '-isystem', incdir, '-include',
'build/%s-%s/config.h' % (config['ARCH'], config['BUILD'])],
'LIBPATH': [env['_LIBOUTDIR']],
'LIBS': libraries,
})
# Add in specified flags.
self.merge_flags(env, flags)
# Add paths for dependencies.
def add_library(lib):
if lib in self.libraries:
paths = [d[0] if type(d) == tuple else d for d in self.libraries[lib]['include_paths']]
self.merge_flags(env, {'CPPPATH': paths})
for dep in self.libraries[lib]['build_libraries']:
add_library(dep)
for lib in libraries:
add_library(lib)
# Add paths for default libraries. Technically we shouldn't add libc++
# here if what we're building isn't C++, but we don't know that here,
# so just add it - it's not a big deal.
if not 'CCFLAGS' in flags or '-nostdinc' not in flags['CCFLAGS']:
add_library('c++')
add_library('m')
add_library('system')
# Set up emitters to set dependencies on default libraries.
def add_library_deps(target, source, env):
if not '-nostdlib' in env['LINKFLAGS']:
Depends(target[0], env['_LIBOUTDIR'].File('libclang_rt.builtins-%s.a' % (env['_CONFIG']['TOOLCHAIN_ARCH'])))
if not ('-nostdlib' in env['LINKFLAGS'] or '-nostartfiles' in env['LINKFLAGS']):
Depends(target[0], env['_LIBOUTDIR'].glob('*crt*.o'))
if not ('-nostdlib' in env['LINKFLAGS'] or '-nodefaultlibs' in env['LINKFLAGS']):
Depends(target[0], env['_LIBOUTDIR'].File('libsystem.so'))
if env['SMARTLINK'](source, target, env, None) == '$CXX':
Depends(target[0], env['_LIBOUTDIR'].File('libc++.so'))
return target, source
env.Append(SHLIBEMITTER = [add_library_deps])
env.Append(PROGEMITTER = [add_library_deps])
# Add the userspace builders.
env.AddMethod(builders.kiwi_application_method, 'KiwiApplication')
env.AddMethod(builders.kiwi_library_method, 'KiwiLibrary')
env.AddMethod(builders.kiwi_service_method, 'KiwiService')
self.envs.append((name, env))
return env
def Clone(self, base, **kwargs):
"""Create a new environment based on an existing named environment."""
name = kwargs['name'] if 'name' in kwargs else None
flags = kwargs['flags'] if 'flags' in kwargs else {}
env = base.Clone()
self.merge_flags(env, flags)
self.envs.append((name, env))
return env
def merge_flags(self, env, flags):
# The MergeFlags function in Environment only handles lists. Add
# anything else manually.
merge = {}
for (k, v) in flags.items():
if type(v) == list:
if k in env:
merge[k] = v
else:
env[k] = v
elif type(v) == dict and k in env and type(env[k]) == dict:
env[k].update(v)
else:
env[k] = v
env.MergeFlags(merge)
|
#Test Online Decoder Set up
from definitions import *
### TODO
class PSTH: ###Initiate PSTH with desired parameters, creates unit_dict which has wanted units and will catch timestamps from plexon.
def __init__(self, channel_dict, pre_time_start, pre_time_end, post_time_start, post_time_end, pre_total_bins, post_total_bins):
self.pre_time = pre_time
self.post_time = post_time
self.total_bins = total_bins
self.channel_dict = channel_dict
self.unit_dict = {}
for chan, unit_list in self.channel_dict.items():
print('chan {}'.format(chan))
if chan not in self.unit_dict.keys():
self.unit_dict[chan] = {}
for unit in unit_list:
self.unit_dict[chan][unit] = []
###### build_unit will be used to gather timestamps from plexon and add them to the unit_dict which will be used to compare psth formats, etc.
def build_unit(self, tmp_channel, tmp_unit, tmp_timestamp):
self.unit_dict[tmp_channel][tmp_unit].append(tmp_timestamp)
##### Creates PSTH for the event
def psth(self, data, total_units, total_bins, pre_time, post_time):
unit_event_response = dict()
pop_event_response = dict()
total_unit_length = total_units * total_bins
start_indeces = [start for start in range(0, total_unit_length, total_bins)]
end_indeces = [end + total_bins for end in start_indeces]
unit_ranges = list(zip(start_indeces, end_indeces))
for event in data['events']:
event_trials = len(data['events'][event])
population_response = numpy.empty(shape=(event_trials, (total_units * total_bins)))
unit_event_response[event] = dict()
for unit_index, unit in enumerate(data['neurons']):
unit_start = unit_ranges[unit_index][0]
unit_end = unit_ranges[unit_index][1]
unit_ts = numpy.array(data['neurons'][unit])
unit_response = numpy.empty(shape=(event_trials, total_bins))
# Create relative response from population on given trial
# Relative response dimensions:
# unit: Trials x total bins population: Trials x (units * total bins)
for trial_index, trial_ts in enumerate(data['events'][event]):
trial_ts = float(trial_ts)
offset_ts = unit_ts - trial_ts
binned_response = numpy.histogram(offset_ts, total_bins,
range = (-abs(pre_time), post_time))[0]
unit_response[trial_index] = binned_response
population_response[trial_index, unit_start:unit_end] = binned_response
unit_event_response[event][unit] = unit_response
pop_event_response[event] = population_response
return (pop_event_response, unit_event_response)
def KBDTest(self):
print(self.unit_dict)
def run():
# Initialize the API class
client = PyOPXClientAPI()
running = True
# Connect to OmniPlex Server, check for success
client.connect()
if not client.connected:
print("Client isn't connected, exiting.\n")
print("Error code: {}\n".format(client.last_result))
exit()
print("Connected to OmniPlex Server\n")
# Get global parameters
global_parameters = client.get_global_parameters()
#
for source_id in global_parameters.source_ids:
source_name, _, _, _ = client.get_source_info(source_id)
if source_name == 'KBD': #Do something KBD
keyboard_event_source = source_id
# Print information on each source
for index in range(global_parameters.num_sources):
# Get general information on the source
source_name, source_type, num_chans, linear_start_chan = client.get_source_info(global_parameters.source_ids[index])
# Store information about the source types and names for later use.
source_numbers_types[global_parameters.source_ids[index]] = source_type
source_numbers_names[global_parameters.source_ids[index]] = source_name
print("----- Source {} -----".format(global_parameters.source_ids[index]))
print("Name: {}, Type: {}, Channels: {}, Linear Start Channel: {}".format(source_name,
source_types[source_type],
num_chans,
linear_start_chan))
if source_type == SPIKE_TYPE:
# Get information specific to a spike source
_, rate, voltage_scaler, trodality, pts_per_waveform, pre_thresh_pts = client.get_spike_source_info(source_name)
# Store information about the source rate and voltage scaler for later use.
source_numbers_rates[global_parameters.source_ids[index]] = rate
source_numbers_voltage_scalers[global_parameters.source_ids[index]] = voltage_scaler
print("Digitization Rate: {}, Voltage Scaler: {}, Trodality: {}, Points Per Waveform: {}, Pre-Threshold Points: {}".format(rate,
voltage_scaler,
trodality,
pts_per_waveform,
pre_thresh_pts))
if source_type == CONTINUOUS_TYPE:
# Get information specific to a continuous source
_, rate, voltage_scaler = client.get_cont_source_info(source_name)
# Store information about the source rate and voltage scaler for later use.
source_numbers_rates[global_parameters.source_ids[index]] = rate
source_numbers_voltage_scalers[global_parameters.source_ids[index]] = voltage_scaler
print("Digitization Rate: {}, Voltage Scaler: {}".format(rate, voltage_scaler))
print("\n")
print("After starting, use CTRL-C or any OmniPlex keyboard event to quit...")
input("\nPress Enter to start reading data...\n")
running = True
#################################################################
try:
while running == True:
# Wait up to 1 second for new data to come in //// Will Be Changed to deal with
client.opx_wait(1000)
# Get a new batch of client data, timestamps only (no waveform or A/D data)
new_data = client.get_new_data()
# Handle the unlikely case that there are fewer blocks returned than we want to output
if new_data.num_data_blocks < max_block_output:
num_blocks_to_output = new_data.num_data_blocks
else:
num_blocks_to_output = max_block_output
# If a keyboard event is in the returned data
for i in range(new_data.num_data_blocks):
if new_data.source_num_or_type[i] == keyboard_event_source:
psthtest.KBDTest()
running = False
####################################################################
#print("{} blocks read. Displaying info on first {} blocks; first {} samples of continuous/spike data.".format(new_data.num_data_blocks, num_blocks_to_output, max_samples_output))
for i in range(num_blocks_to_output):
# Output info
tmp_source_number = new_data.source_num_or_type[i] #Ignore Just need to check that they are all Spike Data / Timestamps with an IF
tmp_channel = new_data.channel[i] #IMPORTANT- Will Need this Channel
tmp_source_name = source_numbers_names[tmp_source_number] #Ignore
tmp_voltage_scaler = source_numbers_voltage_scalers[tmp_source_number] #Ignore
tmp_timestamp = new_data.timestamp[i] #IMPORTANT- This is the Timestamp Data that will be saved to the struct / class.
tmp_unit = new_data.unit[i] #IMPORTANT- Unit of the Channel
tmp_rate = source_numbers_rates[tmp_source_number] #Ignore
# Convert the samples from AD units to voltage using the voltage scaler
tmp_samples = new_data.waveform[i][:max_samples_output]
tmp_samples = [s * tmp_voltage_scaler for s in tmp_samples]
# Construct a string with the samples for convenience
tmp_samples_str = '{} ' * len(tmp_samples)
tmp_samples_str = tmp_samples_str.format(*tmp_samples)
if tmp_channel in channel_dict and tmp_unit in channel_dict[tmp_channel] and source_numbers_types[new_data.source_num_or_type[i]] == SPIKE_TYPE:
psthtest.build_unit(tmp_channel, tmp_unit, tmp_timestamp)
print("SRC:{} {} RATE:{} TS:{} CH:{} Unit:{} TS:{}".format(tmp_source_number, tmp_source_name, tmp_rate, tmp_timestamp, tmp_channel, tmp_unit, tmp_timestamp))
#
#if source_numbers_types[new_data.source_num_or_type[i]] == CONTINUOUS_TYPE:
# print("SRC:{} {} RATE:{} TS:{} CH:{} WF:{}".format(tmp_source_number, tmp_source_name, tmp_rate, tmp_timestamp, tmp_channel, tmp_samples_str))
if source_numbers_types[new_data.source_num_or_type[i]] == EVENT_TYPE and tmp_channel ==1: #Restrict this to Events that we want timestamps for in the code (EX:Event 1 good press)
print("SRC:{} {} TS:{} CH:{}".format(tmp_source_number, tmp_source_name, tmp_timestamp, tmp_channel))
###CLASS HERE ###
# Pause execution, allowing time for more data to accumulate in OmniPlex Server
time.sleep(poll_time_s)
except KeyboardInterrupt:
print("\nCTRL-C detected; stopping acquisition.")
# Disconnect from OmniPlex Server
client.disconnect()
################################################################
if __name__ == '__main__':
##Setup Plexon Server
# Initialize the API class
client = PyOPXClientAPI()
# Connect to OmniPlex Server, check for success
client.connect()
if not client.connected:
print("Client isn't connected, exiting.\n")
print("Error code: {}\n".format(client.last_result))
exit()
print("Connected to OmniPlex Server\n")
# Get global parameters
global_parameters = client.get_global_parameters()
# For this example, we'll treat DO channel 8 as if it's connected
# to the OmniPlex strobe input
strobe_channel = 9
for source_id in global_parameters.source_ids:
source_name, _, _, _ = client.get_source_info(source_id)
if source_name == 'KBD':
keyboard_event_source = source_id
if source_name == 'AI':
ai_source = source_id
# Print information on each source
for index in range(global_parameters.num_sources):
# Get general information on the source
source_name, source_type, num_chans, linear_start_chan = client.get_source_info(global_parameters.source_ids[index])
# Store information about the source types and names for later use.
source_numbers_types[global_parameters.source_ids[index]] = source_type
source_numbers_names[global_parameters.source_ids[index]] = source_name
if source_name == 'AI':
print("----- Source {} -----".format(global_parameters.source_ids[index]))
print("Name: {}, Type: {}, Channels: {}, Linear Start Channel: {}".format(source_name,
source_types[source_type],
num_chans,
linear_start_chan))
if source_type == CONTINUOUS_TYPE and source_name == 'AI':
# Get information specific to a continuous source
_, rate, voltage_scaler = client.get_cont_source_info(source_name)
# Store information about the source rate and voltage scaler for later use.
source_numbers_rates[global_parameters.source_ids[index]] = rate
source_numbers_voltage_scalers[global_parameters.source_ids[index]] = voltage_scaler
print("Digitization Rate: {}, Voltage Scaler: {}".format(rate, voltage_scaler))
##Setup for Plexon DO
compatible_devices = ['PXI-6224', 'PXI-6259']
plexdo = PyPlexDO()
doinfo = plexdo.get_digital_output_info()
device_number = None
for i in range(doinfo.num_devices):
if plexdo.get_device_string(doinfo.device_numbers[i]) in compatible_devices:
device_number = doinfo.device_numbers[i]
if device_number == None:
print("No compatible devices found. Exiting.")
sys.exit(1)
else:
print("{} found as device {}".format(plexdo.get_device_string(device_number), device_number))
res = plexdo.init_device(device_number)
if res != 0:
print("Couldn't initialize device. Exiting.")
sys.exit(1)
plexdo.clear_all_bits(device_number)
##End Setup for Plexon DO
##Setup for Plexon Server
# Handy strings to have associated to their respective types
system_types = { OPXSYSTEM_INVALID: "Invalid System", OPXSYSTEM_TESTADC: "Test ADC", OPXSYSTEM_AD64: "OPX-A", OPXSYSTEM_DIGIAMP: "OPX-D", OPXSYSTEM_DHSDIGIAMP: "OPX-DHP" }
source_types = { SPIKE_TYPE: "Spike", EVENT_TYPE: "Event", CONTINUOUS_TYPE: "Continuous", OTHER_TYPE: "Other" }
# This will be filled in later. Better to store these once rather than have to call the functions
# to get this information on every returned data block
source_numbers_types = {}
source_numbers_names = {}
source_numbers_rates = {}
source_numbers_voltage_scalers = {}
# To avoid overwhelming the console output, set the maximum number of data
# blocks to print information about
max_block_output = 100
# To avoid overwhelming the console output, set the maximum number of continuous
# samples or waveform samples to output
max_samples_output = 50
channel_dict = {1: [0,1,2,3], 2: [0,1,2], 4:[0,1,2,3,4], 5: [0,1]} #New Format to compare Channel and Unit. 0 is unsorted
pre_time_start = .200 #seconds (This value is negative or whatever you put, ex: put 0.200 for -200 ms)
pre_time_end = 0 #seconds
post_time_start = 0 #seconds
post_time_end = .200 #seconds
bin_size = 0.001 #seconds
pre_total_bins = 200 #bins
post_total_bins = 200 #bins
# Poll time in seconds
poll_time_s = .250
psthtest = PSTH(channel_dict, pre_time, post_time, total_bins)
print('run')
### Run Function
run()
|
from django.db import models
from core.models import FactionType, User
class Faction(models.Model):
name = models.CharField(max_length=250)
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='factions', blank=True, null=True)
type = models.ForeignKey(FactionType, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
|
import random
finish=[]
start=[0,0]
fires=[]
walls=[]
env=[]
def init_env():
for i in range(102):
env.append([])
for j in range(102):
env[i].append(0)
def init_finish():
x=random.randint(1,101)
y=random.randint(1,101)
finish.append(x)
finish.append(y)
env[x][y]=1
def init_fires():
num=random.randint(1,20)
for i in range(num):
x=random.randint(1,101)
y=random.randint(1,101)
while x==finish[0] and y==finish[1]:
x=random.randint(1,101)
y=random.randint(1,101)
fires.append([x,y])
env[x][y]=-1
def init_walls():
num=random.randint(1,20)
for i in range(num):
x=random.randint(1,101)
y=random.randint(1,101)
while env[x][y]!=0:
x=random.randint(1,101)
y=random.randint(1,101)
walls.append([x,y])
env[x][y]=-2
init_env()
init_finish()
init_fires()
init_walls()
for i in range(101):
print(env[i])
|
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from xinfo.getXInfo import get_xSettings
# Hospital Information Model.
class HospitalInformation(models.Model):
type = models.CharField(
max_length=4,
help_text="Type of hospital facility i.e. special /general")
name = models.CharField(
max_length=50,
help_text="Name of Institution")
address = models.CharField(
max_length=50,
help_text="Location")
city = models.CharField(
max_length=20,
help_text="City")
region = models.CharField(
max_length=15,
help_text="Region i.e. Province or State")
country = models.CharField(
max_length=25,
help_text="Country")
emergency_available = models.BooleanField(
default=True,
help_text="Emergency Service")
has_lab = models.BooleanField(
default=True,
help_text="Lab/ Investigation")
provides_ambulance = models.BooleanField(
default=True,
help_text="Ambulance service")
is_active = models.BooleanField(
default=True,
help_text="Closed/ Operating")
phone = models.CharField(
max_length=15,
help_text="Contact Number")
tollfree_no = models.CharField(
max_length=30,
help_text="Toll Free Number")
website = models.CharField(
max_length=100,
help_text="Website Link",
null=True)
@property
def hospital_type(self):
return get_xSettings(tablespace="hospital",
identity="type",
code=self.type)
class Meta:
indexes = [
models.Index(fields=['name'], name='hospital_idx'),
models.Index(fields=['city', 'region', 'country'],
name='location_idx')
]
# Department Information Model.
class DepartmentInformation(models.Model):
hospital = models.ForeignKey(
'HospitalInformation',
on_delete=models.CASCADE,
help_text="Related Hospital")
is_active = models.BooleanField(
default=True,
help_text="Closed/Serving")
name = models.CharField(
max_length=4,
help_text="Department Name")
depart_head = models.ForeignKey(
'user.User',
on_delete=models.RESTRICT,
help_text="Department Head Name")
updated_date = models.DateTimeField(auto_now_add=True)
@property
def department_name(self):
return get_xSettings(tablespace="department",
identity="name",
code=self.name)
# Room Information Model.
class RoomInformation(models.Model):
room_type = models.CharField(
max_length=4,
help_text="Type i.e. ICU / CCU / General")
block = models.CharField(
max_length=20,
help_text="Building Block Code")
sub_block = models.CharField(
max_length=20,
null=True,
help_text="Sub Block Code")
is_active = models.BooleanField(
default=True,
help_text="Active/Inactive")
department = models.ForeignKey(
'DepartmentInformation',
on_delete=models.CASCADE,
help_text="Department Related to")
@property
def type(self):
return get_xSettings(tablespace="room",
identity="type",
code=self.room_type)
class Meta:
indexes = [
models.Index(fields=['is_active', 'room_type'], name='roomty_idx')
]
# Bed Information Model
class BedInformation(models.Model):
bed_type = models.CharField(
max_length=4,
help_text="Bed Type")
room = models.ForeignKey(
'RoomInformation',
on_delete=models.CASCADE,
help_text="Room Related To")
is_active = models.BooleanField(
default=True,
help_text="Active/Inactive")
is_occupied = models.BooleanField(
default=False,
help_text="Occupied/Vacant")
visitor_allowed = models.BooleanField(
default=False,
help_text="Visitor Allowed")
planned_service_date = models.DateTimeField(
null=True,
help_text="Regular Planned Service Date")
last_service_date = models.DateTimeField(
auto_now_add=True,
help_text="Last service Date")
@property
def type(self):
return get_xSettings(tablespace="bed",
identity="type",
code=self.bed_type)
class Meta:
indexes = [
models.Index(fields=['is_active', 'is_occupied'],
name='available_idx'),
models.Index(fields=['planned_service_date'], name='service_idx')
]
# Vacancy Related Information
class VacancyInfo(models.Model):
class VacancyType(models.TextChoices):
CONTRACT = 'CONT', _('Contract Full Time')
PERMANENT = 'PERM', _('Permanent Full Time')
PARTTIME = 'PART', _('Part Time')
type = models.CharField(
max_length=4,
help_text="Type of Vacancy")
position = models.CharField(
max_length=15,
help_text="Position of vacancy")
required_experience = models.IntegerField(
default=0,
help_text="Experience Required Years")
department = models.ForeignKey(
'DepartmentInformation',
on_delete=models.CASCADE,
help_text="Department Related To")
hospital = models.ForeignKey(
'HospitalInformation',
on_delete=models.CASCADE,
help_text="Hospital Related To")
description = models.CharField(
max_length=1000,
help_text="Vacancy Description")
closure_date = models.DateTimeField(
default=timezone.now,
help_text="Apply up till")
total_open = models.IntegerField(
default=1,
help_text="Total Vacant Position")
link = models.CharField(
max_length=100,
help_text="Application Link",
null=True)
@property
def vancancy_type(self):
return get_xSettings(tablespace="room",
identity="type",
code=self.type)
class Meta:
indexes = [
models.Index(fields=['type', 'position', 'department'],
name='vacantpost_idx'),
models.Index(fields=['closure_date'], name='closedt_idx')
]
|
import re
import numpy as np
import lib.libhelperfunctions as hf
import lib.errors as err
import matplotlib.pyplot as plt
def import_cnc(fname):
with open(fname) as f:
text = f.read()
return text
def clear_code(s):
# replace all linebreaks
lines = s.splitlines()
# replace all indentations
# divide lines by ';' and just take left side (ignores comments)
lines = [li.strip().split(';')[0] for li in lines]
return lines
def find_desired_section(textlist,
start_string='START_EBH:'.lower(),
end_string=None):
textlist = hf.list_to_lower(textlist)
if not end_string:
end_string = ['END_EBH:'.lower(), 'RET'.lower()]
try:
start_idx = textlist.index(start_string)
except ValueError:
start_idx = None
if start_idx is not None:
for i, es in enumerate(end_string):
try:
end_idx = textlist.index(es, start_idx)
break
except ValueError:
if i < len(end_string) - 1:
continue
else:
end_idx = None
else:
end_idx = None
return start_idx, end_idx
def get_parameter_mode(cnc_list, crit='if _paket'):
converted_list = hf.list_to_lower(cnc_list)
if any(crit in s for s in converted_list):
mode = 'dual'
else:
mode = 'single'
return mode
def get_correct_part(np):
if np == 'NP-1':
n_search = 'if _paket == "NP2"'.lower()
elif np == 'NP-2':
n_search = 'if _paket == "NP1"'.lower()
else:
raise AttributeError(f"Bauteil {np} unbekannt.")
return n_search
def get_parameters(cnc, start_idx=None, end_idx=None, mode='single', n_p='NP-1', ax_finder=None, comment=';'):
if not ax_finder:
ax_dict = {'a': {
'r': r'\ba{1}=\S+',
'val': []
},
'sq': {
'r': r'\bsq\s+\S*',
'val': []
},
'sl': {
'r': r'\bsl\s+\S*',
'val': []
},
'fms': {
'r': r'\bfms\s+\S*',
'val': []
},
'soy': {
'r': r'\bsoy\S+',
'val': []
},
'g': {
'r': r'g90|g91',
'val': []
}
}
else:
ax_dict = ax_finder
if start_idx is not None and end_idx is not None:
section = cnc[start_idx:end_idx]
else:
section = cnc
conv_data = hf.list_to_lower(section)
exp = r'|'.join([v['r'] for k, v in ax_dict.items()])
if mode == 'dual':
# bestimme, welcher Abschnitt gescannt werden soll
n_search = get_correct_part(n_p)
scan_start, scan_end = find_desired_section(conv_data, start_string=n_search, end_string=['ENDIF'.lower()])
erg = [e for e in (re.findall(exp, c) for i, c in enumerate(conv_data) if not scan_start <= i <= scan_end) if e]
else:
erg = [e for e in (re.findall(exp, c) for c in conv_data) if e]
for k in ax_dict.keys():
ax_dict[k]['val'] = [''] * len(erg)
for i, stringlist in enumerate(erg):
for s in stringlist:
if re.match(ax_dict[k]['r'], s):
ax_dict[k]['val'][i] = re.sub(f'^{k}[= ]', '', s)
return {k: v['val'] for k, v in ax_dict.items()}
def get_unknown_vars(c_list):
flattend = hf.flatten(c_list)
reg_vars = r'\b_.*?\b'
erg = [e for e in (re.findall(reg_vars, s) for s in flattend) if e]
return hf.remove_duplicates(hf.flatten(erg))
def evaluate_pb_expression(s):
pat = r'([IiAa][Cc]\(|\))'
return eval(re.sub(pat, '', str(s)))
def gcode_to_values(code_dict, lead_axis='a', g_start='g91', inc=5.0, offset=True, force_fill=False):
def get_distance(start, gs, val):
if not val:
return 0
calc_val = evaluate_pb_expression(val)
# Wenn Absolutwerte angenommen werden sollen
if (')' in val) and ('(' not in val) or re.search(r'[AaCc]\(', val) or (gs == 'g90'):
dist = calc_val - start
elif gs == 'g91' or re.search(r'[IiCc]\(', val):
dist = calc_val
else:
raise TypeError(f'Expression {val} cannot be evaluated.')
return dist
# wenn im dictionary keine G-Saetze enthalten sind
cd = code_dict
if 'g' not in cd.keys() or not cd['g']:
raise AttributeError('No G-sets defined!')
# Wenn die Führungsachse nicht enthalten ist
if lead_axis not in cd.keys():
raise AttributeError('Leading Axis not properly defined!')
# Wenn kein Strahlstrom definiert wurde
if 'sq' not in cd.keys():
raise AttributeError('No Power Column defined!')
starts = {k: 0 for k in cd.keys()}
data = {k: [] for k in cd.keys() if k != 'g'}
lead_start = 0
g_act = g_start
steps = []
for i in range(len(cd['g'])):
# setze den aktuellen G-Befehl
if cd['g'][i]:
g_act = cd['g'][i]
# ansonsten fülle die G-Befehle auf
else:
cd['g'][i] = g_act
# überspringe Zeilen, in denen keine Bewegung der Führungsachse stattfindet
if not cd[lead_axis][i]:
steps.append(None)
continue
# Wenn noch keine Bewegung der Achse stattgefunden hat
elif lead_start == 0:
if offset:
# setze den Offsetwert als Startwert
lead_start = hf.maybeMakeNumber(cd[lead_axis][i])
if type(lead_start) not in [int, float]:
raise AttributeError(f'Misconfigured Axis {lead_axis}')
lead_dist = get_distance(lead_start, g_act, cd[lead_axis][i])
steps.append(abs(int(lead_dist/inc)))
lead_start += lead_dist
sorted_steps = [s for s in steps if s]
for s in sorted_steps:
if 0 <= s < 1:
raise err.ValueTooLargeError(f'Increment {inc} is too coarse.')
elif 1 <= s < 3 and not force_fill:
raise err.ValueNearlyTooLargeError(f'Increment {inc} could be too coarse.')
elif s > 3000 and not force_fill:
raise err.ValueTooSmallError(f'Increment {inc} could be too small.')
# -1 da G herausgelassen wird
offset_reached = [not offset] * (len(cd.keys()) - 1)
j = 0
for i in range(len(cd[lead_axis])):
s = steps[j]
if not s:
j += 1
continue
for it, k in enumerate(k for k in cd.keys() if k != 'g'):
v = cd[k][i]
if i == len(cd[lead_axis]) - 1:
add = 1
else:
add = 0
start_val = evaluate_pb_expression(starts[k])
if not all(offset_reached):
if not v:
vals = [start_val]
else:
vals = [evaluate_pb_expression(v)]
starts[k] = vals[0]
offset_reached[it] = True
else:
if not v:
vals = np.repeat(start_val, s)
else:
if k == 'fms' or k == 'f':
if not v:
stop_val = start_val
else:
stop_val = eval(v)
vals = np.repeat(stop_val, s)
else:
g = cd['g'][i]
stop_val = start_val + get_distance(start_val, g, v)
vals = np.linspace(start_val, stop_val, num=s+1, endpoint=True)[1:]
starts[k] = stop_val
data[k].append(vals)
j += 1
for k in data.keys():
data[k] = hf.flatten(data[k])
return data
def replace_missing_values(input_dict, replace_dict):
def replace_with_dict(li, r_dict):
list_to_string = ''.join(li)
newlist = li.copy()
for i, el in enumerate(li):
for key, value in r_dict.items():
if key in list_to_string and key in el:
newlist[i] = el.replace(key, str(value))
return newlist
outdict = {key: replace_with_dict(input_dict[key], replace_dict) for key in input_dict.keys()}
return outdict
def get_available_axis(axlist):
all_ax = ['g', 'a', 'b', 'x', 'y', 'z', 'sox', 'soy', 'su', 'sv', 'sq', 'sl', 'fms']
return [a for a in all_ax if a not in axlist]
def parse_settings(c_list):
reg_calcs = r'\(.*?\)'
reg_vars = r'\b_.*?\b'
return
def get_value(t, search):
pattern = fr'{search}\s.*?='
prog = re.compile(pattern)
result = []
for s in t:
if prog.search(s) and '=' in s:
result = s.split(';')[0].split('=')[1].strip()
break
return result
def get_values_from_parameters(code, pars, mode='single', p_start=None, p_end=None, n_p='NP-1'):
v_dict = {i: None for i in pars}
code = hf.list_to_lower(code)
if mode == 'single':
for k in v_dict.keys():
v_dict[k] = get_value(code, k)
par_pat = r'_\w*'
erg = [re.match(par_pat, v_dict[k]) for k in v_dict.keys()]
while any(erg):
for k in v_dict.keys():
v_dict[k] = get_value(code, k)
erg = [re.match(par_pat, v_dict[k]) for k in v_dict.keys()]
elif mode == 'dual':
para_data = code[p_start:p_end]
# bestimme, welcher Abschnitt gescannt werden soll
n_search = get_correct_part(n_p)
scan_start, scan_end = find_desired_section(para_data, start_string=n_search, end_string=['ENDIF'.lower()])
par_definition = [el for i, el in enumerate(para_data) if not scan_start <= i <= scan_end]
for k in v_dict.keys():
v_dict[k] = get_value(par_definition, k)
if not v_dict[k]:
v_dict[k] = get_value(code, k)
par_pat = r'_\w*'
erg = [re.search(par_pat, v_dict[k]) for k in v_dict.keys()]
while any(erg):
for k in v_dict.keys():
match = re.findall(par_pat, v_dict[k])
if not match:
continue
old_val = match
find_vals = [get_value(code, ov) for ov in old_val]
if not all(find_vals):
print(f'Kann >>{old_val}<< nicht finden')
erg = False
break
for old, new in zip(old_val, find_vals):
v_dict[k] = v_dict[k].replace(old, new)
if not erg:
break
else:
erg = [re.search(par_pat, v_dict[k]) for k in v_dict.keys()]
for k in v_dict.keys():
try:
v_dict[k] = eval(v_dict[k])
except ValueError:
continue
return v_dict
def print_data(d: dict):
fig, ax = plt.subplots()
for k in d.keys():
ax.plot(d[k], label=k)
plt.legend()
plt.show()
def detect_offset(pars: dict, ax: str):
for i, el in enumerate(pars.get(ax)):
if el:
row = i
break
offset = any([k for k in pars.keys() if k not in ['fms', 'g']])
return offset
if __name__ == "__main__":
filename = "../data/EBH_123.MPF"
nst = 'NP-2'
leading_axis = 'a'
increment = 0.2
force_calc = False
# importiere die Datei
raw_cnc = import_cnc(filename)
# bearbeite den CNC-Code
cnc = clear_code(raw_cnc)
# finde die Parameter
strt, end = find_desired_section(cnc)
ebh_cnc = cnc[strt:end]
c_strt, c_end = find_desired_section(ebh_cnc, start_string='PYR_STRT'.lower(), end_string=['PYR_STOP'.lower()])
contour_cnc = cnc[strt + c_strt:strt + c_end]
contour_mode = get_parameter_mode(contour_cnc)
contour_parameters = get_parameters(contour_cnc, mode=contour_mode, n_p=nst)
# untersuche die Parameter auf Variablen
par_cnc = cnc[strt:strt + c_strt]
par_mode = get_parameter_mode(par_cnc)
unknown_vars = get_unknown_vars(contour_parameters.values())
if '_' + leading_axis + '_off' in unknown_vars:
lead_offset = True
else:
lead_offset = False
values = get_values_from_parameters(cnc, unknown_vars, p_start=strt, p_end=strt + c_strt, mode=par_mode, n_p=nst)
corrected_values = replace_missing_values(contour_parameters, values)
# erzeuge aus der Tabelle Messwerte
while True:
try:
gcode = gcode_to_values(corrected_values, inc=increment, offset=lead_offset, force_fill=force_calc)
break
except err.ValueTooLargeError as E:
force_calc = False
print(E)
new_inc = float(input("Provide a new increment!\n"))
increment = new_inc
except (err.ValueNearlyTooLargeError, err.ValueTooSmallError) as E:
force_calc = False
print(E)
while True:
response = str(input("Continue anyway? (Y/N)\n"))
if response in 'YyNnJj':
if response in 'YyJj':
force_calc = True
break
else:
new_inc = float(input("Provide a new increment!\n"))
increment = new_inc
break
else:
print('Unsupported Answer.')
print('finished')
# print_data(gcode)
|
import numpy as np
import os
import re
from collections import defaultdict
from scipy import random
validationDir = 'validation'
if not os.path.exists(validationDir):
os.makedirs(validationDir)
output = np.genfromtxt('whales.csv', skip_header=1, dtype=[('image', 'S10'), ('label', 'S11')], delimiter=',')
whales = [int(re.search("whale_(\\d+)", x[1]).group(1)) for x in output]
images = [int(re.search("w_(\\d+)\.jpg", x[0]).group(1)) for x in output]
labelsDict = defaultdict(lambda:-1, {})
filenames = []
testset = []
indir = 'train'
for file in os.listdir(indir):
if(file.endswith('.jpg')):
filenames.append(file)
testdir = 'test'
for file in os.listdir(testdir):
if(file.endswith('.jpg')):
testset.append(file)
for i in xrange(len(whales)):
if(labelsDict[whales[i]] == -1):
labelsDict[whales[i]] = []
labelsDict[whales[i]].append(images[i])
testset = [int(re.search("w_(\\d+)\.jpg", x).group(1)) for x in testset]
for w in set(whales):
allExamplesForW = labelsDict[w]
allExamplesForW = [x for x in allExamplesForW if x not in testset]
allExamplesForW = random.permutation(allExamplesForW)
for i in allExamplesForW[0:(len(allExamplesForW)/2)+(random.randint(0,(len(allExamplesForW))%2+1))]:
print("copying %d\n"%i)
os.rename(("%s/w_%d.jpg") % (indir, i), ("%s/w_%d.jpg") %(validationDir, i))
|
import torch
import cv2
import numpy as np
def size(net):
pp = 0
for p in list(net.parameters()):
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp
def gen_trimap(segmentation_mask, k_size = 7, iterations = 6):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k_size, k_size))
dilated = cv2.dilate(segmentation_mask, kernel, iterations=iterations)
eroded = cv2.erode(segmentation_mask, kernel, iterations=iterations)
trimap = np.zeros(segmentation_mask.shape, dtype=np.uint8)
trimap.fill(128)
trimap[eroded >= 255] = 255
trimap[dilated <= 0] = 0
return trimap
def inspect_model(model):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
images = torch.randn(4, 3, 64, 64).to(device)
net = model(num_classes=2).to(device)
logits = net(images)
if images.shape[-2:] != logits.shape[-2:]:
raise ValueError('Output sized {} while {} expected'.format(logits.shape[-2:], images.shape[-2:]))
print(size(net), model.__name__, sep='\t')
|
# This __init__.py file for the vscmgStateEffector package is automatically generated by the build system
from vscmgStateEffector import *
|
#!/usr/bin/env python3
s, _, *i = open(0).read().replace(" ", "").split()
for j in sorted(i, key = lambda a: int(a.translate(str.maketrans(s, "0123456789")))):
print(j)
|
#
# Copyright 2021 W. Beck Andrews
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
import matplotlib.pyplot as plt
import utils
import netCDF4 as nc
def get_phi(fname):
dat = nc.Dataset(fname)
n = dat['phi'].shape[0] - 1
return dat['phi'][n,...]
def plot_G_x(base):
Lx = 100
timedep = get_phi(base+'timedep/test.nc')
nocontrol = get_phi(base+'nocontrol/test.nc')
altmin = get_phi(base+'altmin/test.nc')
x = np.linspace(-Lx/2,Lx/2,timedep.shape[0])
# this function returns the fracture energy G as a function of
# the x coordinate (i.e., integrated in y/z)
energy_line_td = utils.fracture_energy_line(timedep,[255,255],Lx)
energy_line_am = utils.fracture_energy_line(altmin,[255,255],Lx)
energy_line_nc = utils.fracture_energy_line(nocontrol,[255,255],Lx)
fig, ax = plt.subplots(1,1, figsize=(3,2.25),dpi=200)
ax.plot(x, energy_line_am,'-',label='Alternating Min.', color='tab:orange')
ax.plot(x, energy_line_nc,'-',label='Time-Dependent', dashes=(2, 1), color='tab:green')
ax.plot(x, energy_line_td,'-',label='Near Equilibrium', dashes=(1, 1), color='tab:blue')
ax.set_xlim([0, Lx/2])
plt.xlabel("$x$-coordinate")
plt.ylabel("$G/G_c$")
plt.legend(loc = 'upper left')
plt.savefig('G_x.svg',dpi=600)
def plot_phi_y(base,xind, ycenter):
Lx = 100
timedep = get_phi(base+'timedep/test.nc')
nocontrol = get_phi(base+'nocontrol/test.nc')
altmin = get_phi(base+'altmin/test.nc')
nx = timedep.shape[0]
halfnx = int((nx+1)/2)
phi_td = np.roll(timedep[xind,:], halfnx-ycenter)
phi_nc = np.roll(nocontrol[xind,:], halfnx-ycenter)
phi_am = np.roll(altmin[xind,:], halfnx-ycenter)
y = np.linspace(-Lx/2,Lx/2,timedep.shape[0])
y_analytic = np.linspace(-Lx/2,Lx/2,timedep.shape[0]*2+1)
phi_analytic = utils.analytical_1D(y_analytic)
fig, ax = plt.subplots(1,1, figsize=(3,2.25),dpi=200)
ax.plot(y, phi_am,'o',label='Alternating Min.',color='None')
ax.plot(y, phi_nc,'s',label='Time-Dependent',color='None')
ax.plot(y, phi_td,'^',label='Near Equilibrium',color='None')
ax.plot(y_analytic, phi_analytic,'-k',label='$(1-x/2)^2$',linewidth=0.5)
ax.set_xlim([-4, 4])
ax.set_ylim([0, 1.0])
plt.xlabel("$y$-coordinate")
plt.ylabel("$\phi$")
plt.legend(bbox_to_anchor=(1.05, 0.8), loc='upper left')
plt.savefig('phi_y.svg',dpi=600)
if(__name__ == '__main__'):
utils.set_mpl_params()
plot_G_x('/work/ws/nemo/fr_wa1005-mu_test-0/quasistatic/combined/tension/point/')
plot_phi_y('/work/ws/nemo/fr_wa1005-mu_test-0/quasistatic/combined/tension/point/',510,257)
|
import requests
import json
from DNA_Token import get_auth_token
def get_device():
"""Building out function to retrieve the uuid of a device. Using requests.get to make a call to the network device"""
token = get_auth_token() # Get Token
url = "https://10.1.100.9/dna/intent/api/v1/network-device/ip-address/10.1.255.10"
hdr = {'x-auth-token': token, 'content-type' : 'application/json'}
resp = requests.get(url, verify=False, headers=hdr) # Make the Get Request
device_det = resp.json()
post_commandr_list(device_det, token)
def post_commandr_list(device_inf_json, post_token):
"""Building out function to retrieve list of command runner commands. Using requests.get to make a call to the DNAC"""
device_uuid = device_inf_json['response']['id']
print ("The device id is:",device_uuid)
post_token = get_auth_token() # Get Token
url = "https://10.1.100.9/dna/intent/api/v1/network-device-poller/cli/read-request"
hdr = {'x-auth-token': post_token, 'content-type' : 'application/json'}
param = {
"name" : "show ver",
"commands" : ["show clock"],
"deviceUuids" : [device_uuid]
}
resp = requests.post(url, verify=False, headers=hdr, data=json.dumps(param)) # Make the Post
command_show = resp.json()
print (command_show)
if __name__ == "__main__":
get_device()
|
# coding=utf-8
from singer import Singer
import requests
import json
import xlwt
# pip3 install singer xlwt
class GitlabRest(object):
def __init__(self):
config = Singer.config(mode='gitlab')
self.__gateway = config.get_config('url').strip('/')
self.__api_url = "%s/api/v4" % self.__gateway
self.__token = config.get_config('token')
self.__session = requests.session()
self.__session.keep_alive = False
def __request(self, api, method='GET', payload={}):
url = "%s/%s" % (self.__api_url, api)
payload["private_token"] = self.__token
response = self.__session.request(method, url, data=payload)
code = response.status_code
if code != 200:
return None
return json.loads(response.text)
def __get_all(self, api, payload={}):
data_list = []
payload['per_page'] = 100
page = 1
while True:
payload['page'] = page
items = self.__request(api, payload=payload)
if items == None or len(items) == 0:
break
else:
page += 1
data_list.extend(items)
return data_list
def get_users(self, page=1, size=20, all=False):
api = 'users'
if all:
return self.__get_all(api)
else:
return self.__request(api, payload={
'page': page,
'per_page': size
})
def get_users_simple(self, page=1, size=20, all=False):
users = self.get_users(page, size, all)
data = {}
for user in users:
data[user['email']] = user['name']
return data
def get_projects(self, page=1, size=20, all=False):
api = 'projects'
if all:
return self.__get_all(api)
else:
return self.__request(api, payload={
'page': page,
'per_page': size
})
def get_branches(self, project_id):
url = "projects/%d/repository/branches" % project_id
branches = self.__request(url)
return branches
def get_commits(self, project_id, branch, start_time=None, end_time=None, page=1, size=20, all=False):
api = "projects/%d/repository/commits" % (project_id)
payload = {
'ref_name': branch,
'order_by': 'created_at',
'sort': 'asc'
}
if start_time:
payload['since'] = start_time
if end_time:
payload['until'] = end_time
if all:
return self.__get_all(api, payload)
else:
payload['page'] = page
payload['per_page'] = size
return self.__request(api, payload=payload)
def get_commit(self, project_id, commit_id):
api = "projects/%d/repository/commits/%s" % (project_id, commit_id)
return self.__request(api)
def get_codes(self, start_time=None, end_time=None):
projects = self.get_projects(all=True)
data = []
commit_ids = []
users = self.get_users_simple(all=True)
def get_project(project):
id = project['id']
branches = [i['name'] for i in self.get_branches(id)]
print(id, project['name'], ','.join(branches))
for branch in branches:
commits = self.get_commits(
id, branch, start_time, end_time, all=True)
print("%d:%s,commits:%d" % (id, branch, len(commits)))
for commit in commits:
commit_id = commit['id']
if commit_id in commit_ids:
continue
commit_data = self.get_commit(id, commit_id)
# print(commit_data)
commit_stats = commit_data['stats']
committer_email = commit_data['committer_email']
committer_name = commit_data['committer_name']
if committer_email in users:
committer_name = users[committer_email]
data.append({
'project_id': id,
'group': project['namespace']['full_path'],
'project': project['name'],
'desc': project['description'],
'branch': branch,
'commit_id': commit_id,
'created_at': commit_data['created_at'],
'message': commit_data['message'],
'author': committer_name,
'author_name': committer_name,
'author_email': committer_email,
'additions': commit_stats['additions'],
'deletions': commit_stats['deletions'],
'total': commit_stats['total']
})
commit_ids.append(commit_id)
# print(commit_id, commit_data)
with Singer.executor(max_workers=6) as executor:
for project in projects:
executor.start(get_project, project)
executor.wait_complete()
return data
def write_excel(data, excel_path):
workbook = xlwt.Workbook()
# 获取第一个sheet页
sheet = workbook.add_sheet('git')
row0 = ['项目ID', '分组', '项目名称', '项目描述', '分支名称', '提交ID',
'新增代码', '删除代码', '总行数', '提交时间', '提交人', '提交人邮箱', '备注消息']
for i in range(0, len(row0)):
sheet.write(0, i, row0[i])
for i in range(0, len(data)):
row = i+1
recode = data[i]
sheet.write(row, 0, recode['project_id'])
sheet.write(row, 1, recode['group'])
sheet.write(row, 2, recode['project'])
sheet.write(row, 3, recode['desc'])
sheet.write(row, 4, recode['branch'])
sheet.write(row, 5, recode['commit_id'])
sheet.write(row, 6, recode['additions'])
sheet.write(row, 7, recode['deletions'])
sheet.write(row, 8, recode['total'])
sheet.write(row, 9, recode['created_at'])
sheet.write(row, 10, recode['author_name'])
sheet.write(row, 11, recode['author_email'])
sheet.write(row, 12, recode['message'])
workbook.save(excel_path)
if __name__ == "__main__":
gl = GitlabRest()
# users = gl.get_users_simple(all=True)
# print(users)
data = gl.get_codes('2020-01-01', '2021-01-01')
write_excel(data, 'd://gitlab_02.xlsx')
|
import re
import yaml
class Character:
def __init__(self, name):
self.name = name
class Event:
def __init__(self, template_text):
self.template_text = template_text
def format(self, context):
tag_pattern = re.compile('<[a-z]+>')
formatted_text = self.template_text
for tag in re.finditer(tag_pattern, formatted_text):
print(tag)
if __name__ == "__main__":
result = yaml.load(open('events.yml', 'r'))
event = Event(result['events'][0]['template_text'])
event.format(None)
|
#!/usr/bin/env python3
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import softmax
import sklearn.datasets
import sklearn.metrics
import sklearn.model_selection
from sklearn.metrics import log_loss
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size")
parser.add_argument(
"--classes", default=10, type=int, help="Number of classes to use"
)
parser.add_argument(
"--hidden_layer", default=20, type=int, help="Hidden layer size"
)
parser.add_argument(
"--iterations", default=50, type=int, help="Number of iterations over the data"
)
parser.add_argument(
"--learning_rate", default=0.01, type=float, help="Learning rate"
)
parser.add_argument("--seed", default=42, type=int, help="Random seed")
parser.add_argument("--test_size", default=797, type=int, help="Test set size")
args = parser.parse_args()
# Set random seed
np.random.seed(args.seed)
# Use the digits dataset
data, target = sklearn.datasets.load_digits(n_class=args.classes, return_X_y=True)
# Append a constant feature with value 1 to the end of every input data
data = np.pad(data, ((0, 0), (0, 1)), constant_values=1)
# Split the data randomly to train and test using `sklearn.model_selection.train_test_split`,
# with `test_size=args.test_size` and `random_state=args.seed`.
train_data, test_data, train_target, test_target = sklearn.model_selection.train_test_split(
data, target, stratify=target, test_size=args.test_size, random_state=args.seed
)
# Generate initial model weights
weights = [
np.random.uniform(
size=[train_data.shape[1], args.hidden_layer], low=-0.1, high=0.1
),
np.random.uniform(size=[args.hidden_layer, args.classes], low=-0.1, high=0.1),
]
relu = lambda x: np.maximum(x, 0)
def forward(inputs):
# TODO: Implement forward propagation, returning *both* the value of the hidden
# layer and the value of the output layer.
#
# We assume a neural network with a single hidden layer of size `args.hidden_layer`
# and ReLU activation, where ReLU(x) = max(x, 0), and an output layer with softmax
# activation.
#
# The value of the hidden layer is computed as ReLU(inputs times weights[0]).
# The value of the output layer is computed as softmax(hidden_layer times weights[1]).
#
# Note that you need to be careful when computing softmax, because the exponentiation
# in softmax can easily overflow. To avoid it, you can use the fact that
# softmax(z) = softmax(z + any_constant) and compute softmax(z) = softmax(z - maximum_of_z).
# That way we only exponentiate values which are non-positive, and overflow does not occur.
Z = inputs @ weights[0]
A = relu(Z)
output = A @ weights[1]
return softmax(output, axis=1), A, Z
for iteration in range(args.iterations):
permutation = np.random.permutation(train_data.shape[0])
permuted_x_train, permuted_y_train = (
train_data[permutation],
train_target[permutation],
)
batch_count = int(train_data.shape[0] / args.batch_size)
for batch_x, batch_y in zip(
np.split(permuted_x_train, batch_count),
np.split(permuted_y_train, batch_count),
):
probs, A, Z = forward(batch_x)
batch_y = np.eye(args.classes)[batch_y]
dZ2 = probs - batch_y
dW2 = 1 / args.batch_size * A.T @ dZ2
dZ1 = dZ2 @ weights[1].T * (Z >= 0).astype(np.int8)
dW1 = 1 / args.batch_size * batch_x.T @ dZ1
weights[1] -= args.learning_rate * dW2
weights[0] -= args.learning_rate * dW1
train_probs, _, _ = forward(train_data)
test_probs, _, _ = forward(test_data)
predictions_train = np.argmax(train_probs, axis=1)
predictions_test = np.argmax(test_probs, axis=1)
print(
"After iteration {}: train acc {:.1f}%, test acc {:.1f}%".format(
iteration + 1,
100
* sklearn.metrics.accuracy_score(
train_target, predictions_train
), # Training accuracy,
100
* sklearn.metrics.accuracy_score(
test_target, predictions_test
), # Test accuracy,
)
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from itertools import product, chain
from pathlib import Path
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.direction import Direction, get_diagonal_directions
from annotation.piece import Piece
from ..gi import BlackGiMoveLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/12'
class TestBlackGiMove(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_gi_move(self):
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
effect = {
direction: np.empty(shape, dtype=np.bool)
for direction in chain(get_diagonal_directions(), [Direction.UP])
}
board = np.empty(shape, dtype=np.int32)
ph_board = tf.placeholder(tf.int32, shape=shape)
gi_effect = {direction: tf.placeholder(tf.bool, shape=shape)
for direction in effect.keys()}
non_promoting, promoting = BlackGiMoveLayer(
data_format=self.data_format
)(ph_board, gi_effect)
# アクセスしやすいように次元を下げる
non_promoting = {key: tf.squeeze(value)
for key, value in non_promoting.items()}
promoting = {key: tf.squeeze(value)
for key, value in promoting.items()}
feed_dict = {placeholder: effect[direction]
for direction, placeholder in gi_effect.items()}
feed_dict[ph_board] = board
with self.test_session() as sess:
for i, j, piece in product(range(9), range(9), range(Piece.SIZE)):
for e in effect.values():
e[:] = False
if self.data_format == 'NCHW':
e[0, 0, i, j] = True
else:
e[0, i, j, 0] = True
piece = Piece(piece)
board[:] = piece
n, p = sess.run([non_promoting, promoting],
feed_dict=feed_dict)
b = np.squeeze(board)
for direction in effect.keys():
n_move = n[direction]
p_move = p[direction]
if j == 0 and direction in (Direction.RIGHT_DOWN,
Direction.LEFT_DOWN):
continue
with self.subTest(i=i, j=j, direction=direction):
self.assertTupleEqual((9, 9), n_move.shape)
self.assertTupleEqual((9, 9), p_move.shape)
if b[i, j] < Piece.WHITE_FU:
# 自身の駒があって動けない
self.assertFalse(np.all(n_move))
self.assertFalse(np.all(p_move))
else:
self.assertTrue(n_move[i, j])
n_move[i, j] = False
self.assertFalse(np.all(n_move))
if (j < 3 or j == 3 and
direction in (Direction.RIGHT_DOWN,
Direction.LEFT_DOWN)):
self.assertTrue(p_move[i, j])
p_move[i, j] = False
self.assertFalse(np.all(p_move))
|
import math
import torch
import numpy as np
import matplotlib.pyplot as plt
from tme6 import CirclesData
def init_params(nx, nh, ny):
params = {}
# TODO remplir avec les paramètres Wh, Wy, bh, by
# params["Wh"] = ...
return params
def forward(params, X):
outputs = {}
# TODO remplir avec les paramètres X, htilde, h, ytilde, yhat
# outputs["X"] = ...
return outputs['yhat'], outputs
def loss_accuracy(Yhat, Y):
L = 0
acc = 0
# TODO
return L, acc
def backward(params, outputs, Y):
grads = {}
# TODO remplir avec les paramètres Wy, Wh, by, bh
# grads["Wy"] = ...
return grads
def sgd(params, grads, eta):
# TODO mettre à jour le contenu de params
return params
if __name__ == '__main__':
# init
data = CirclesData()
data.plot_data()
N = data.Xtrain.shape[0]
Nbatch = 10
nx = data.Xtrain.shape[1]
nh = 10
ny = data.Ytrain.shape[1]
eta = 0.03
# Premiers tests, code à modifier
params = init_params(nx, nh, xy)
Yhat, outs = forward(params, data.Xtrain)
L, _ = loss_accuracy(Yhat, Y)
grads = backward(params, outputs, Y)
params = sgd(params, grads, eta)
# TODO apprentissage
# attendre un appui sur une touche pour garder les figures
input("done")
|
import click
import redis
from prometheus_client import start_http_server
from huey_exporter.EventQueue import EventQueue
@click.command()
@click.option('--connection-string', '-c',
envvar='REDIS_CONNECTION_STRING',
default='redis://localhost:6379',
help='Connection string to redis including database. for example redis://localhost:6379/0'
)
@click.option('--queue-name',
'-q', envvar='QUEUE_NAME',
required=True,
help='Name of the queue to monitor'
)
@click.option('--port', '-p',
envvar='EXPORTER_PORT',
default=9100,
type=click.IntRange(0, 65535),
help='Port to expose the metrics on'
)
def run_exporter(connection_string, queue_name, port):
# Start up the server to expose the metrics.
start_http_server(port)
connection_pool = redis.BlockingConnectionPool.from_url(
connection_string,
max_connections=5,
timeout=10
)
queue = EventQueue(queue_name, connection_pool)
queue.listen()
def main():
run_exporter()
if __name__ == '__main__':
main()
|
x = int(input())
n = 1
while 2 ** n <= x:
n += 1
print(n - 1, 2 ** (n - 1))
|
import discord
import random
import urllib
from discord.ext import commands
class MarioParty(commands.Cog):
"""Cog for Mario Party commands"""
def __init__(self, bot):
self.bot = bot
#Board Command
@commands.group(pass_context=True)
async def board(self, ctx): pass
#1 Subcommand
@board.command(pass_context=True, aliases=['1'])
async def one(self, ctx):
boardList=["DK's Jungle Adventure", "Peach's Birthday Cake", "Yoshi's Tropical Island", "Mario's Rainbow Castle", "Wario's Battle Canyon", "Luigi's Engine Room", "Eternal Star", "Bowser's Magma Mountain"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/1/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#2 Subcommand
@board.command(pass_context=True, aliases=['2'])
async def two(self, ctx):
boardList=["Western Land", "Space Land", "Mystery Land", "Pirate Land", "Horror Land", "Bowser Land"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/2/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#3 Subcommand
@board.command(pass_context=True, aliases=['3'])
async def three(self, ctx):
boardList=["Chilly Waters", "Deep Bloober Sea", "Woody Woods", "Creepy Cavern", "Spiny Desert", "Waluigi's Island"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/3/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#4 Subcommand
@board.command(pass_context=True, aliases=['4'])
async def four(self, ctx):
boardList=["Toad's Midway Madness", "Boo's Haunted Bash", "Koopa's Seaside Soiree", "Goomba's Greedy Gala", "Shy Guy's Jungle Jam", "Bowser's Gnarly Party"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/4/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#5 Subcommand
@board.command(pass_context=True, aliases=['5'])
async def five(self, ctx):
boardList=["Toy Dream", "Rainbow Dream", "Pirate Dream", "Future Dream", "Undersea Dream", "Sweet Dream", "Bowser's Nightmare"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/5/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#6 Subcommand
@board.command(pass_context=True, aliases=['6'])
async def six(self, ctx):
boardList=["Towering Treetop", "E Gadd's Garage", "Faire Square", "Snowflake Lake", "Castaway Bay", "Clockwork Castle"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/6/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#7 Subcommand
@board.command(pass_context=True, aliases=['7'])
async def seven(self, ctx):
boardList=["Grand Canal", "Pagoda Peak", "Pyramid Park", "Neon Heights", "Windmillville", "Bowser's Enchanted Inferno"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/7/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#8 Subcommand
@board.command(pass_context=True, aliases=['8'])
async def eight(self, ctx):
boardList=["DK's Treetop Temple", "Goomba's Booty Boardwalk", "King Boo's Haunted Hideaway", "Shy Guy's Perplex Express", "Koopa's Tycoon Town", "Bowser's Warped Orbit"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/8/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#9 Subcommand
@board.command(pass_context=True, aliases=['9'])
async def nine(self, ctx):
boardList=["Toad Road", "Blooper Beach", "Boo's Horror Castle", "DK's Jungle Ruins", "Bowser's Station", "Magma Mine", "Bob-omb Factory"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/9/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#10 Subcommand
@board.command(pass_context=True, aliases=['10'])
async def ten(self, ctx):
boardList=["Mushroom Park", "Whimsical Waters", "Chaos Castle", "Airship Central", "Haunted Trail"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/10/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#DS Subcommand
@board.command()
async def ds(self, ctx):
boardList=["Wiggler's Garden", "Kamek's Library", "Bowser's Pinball Machine", "Toadette's Music Room", "DK's Stone Statue"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/DS/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#Super Subcommand
@board.command(pass_context=True, aliases=['s'])
async def super(self, ctx):
boardList=["Whomp's Domino Ruins", "King Bob-omb's Powderkeg Mine", "Megafruit Paradise", "Kamek's Tantalizing Tower"]
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/Super/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
#Superstars Subcommand
@board.command(pass_context=True, aliases=['ss'])
async def superstars(self, ctx):
boardList=["Yoshi's Tropical Island", "Peach's Birthday Cake", 'Space Land', 'Horror Land', 'Woody Woods']
board=random.choice(boardList)
boardParsed = urllib.parse.quote(board)
embed = discord.Embed(title=board,
colour=0x98FB98,
timestamp=ctx.message.created_at)
embed.set_image(url="https://raw.githubusercontent.com/UnicorNora/Doopliss/master/boards/Superstars/" + boardParsed + ".png")
embed.set_footer(text=f"Ran by: {ctx.message.author} • Yours truly, {self.bot.user.name}")
embed.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar.url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(MarioParty(bot))
|
from anthill.framework.utils.asynchronous import as_future, thread_pool_exec as future_exec
from anthill.framework.core.exceptions import ImproperlyConfigured
from anthill.framework.core.paginator import Paginator, InvalidPage
from anthill.framework.handlers import RequestHandler
from anthill.framework.http.errors import Http404
from anthill.framework.utils.translation import translate as _
from anthill.platform.api.rest.handlers.base import SerializableMixin
from anthill.platform.handlers import UserHandlerMixin
from sqlalchemy_utils import sort_query
from sqlalchemy.orm import Query
from .base import RestAPIMixin
class MultipleObjectMixin:
"""A mixin for handlers manipulating multiple objects."""
allow_empty = True
queryset = None
model = None
paginate_by = None
paginate_orphans = 0
context_object_name = None
paginator_class = Paginator
page_kwarg = 'page'
ordering = None
def get_queryset(self):
"""
Return the list of items for this handler.
The return value must be an instance of `sqlalchemy.orm.Query`.
"""
if isinstance(self.queryset, Query):
queryset = self.queryset
elif self.model is not None:
queryset = self.model.query
else:
raise ImproperlyConfigured(
"%(cls)s is missing a queryset. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
ordering = self.get_ordering()
if ordering:
if isinstance(ordering, str):
ordering = (ordering,)
queryset = sort_query(queryset, *ordering)
return queryset
def get_ordering(self):
"""Return the field or fields to use for ordering the queryset."""
return self.ordering
def paginate_queryset(self, queryset, page_size):
"""Paginate the queryset, if needed."""
paginator = self.get_paginator(
queryset, page_size, orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty())
page_kwarg = self.page_kwarg
page = self.path_kwargs.get(page_kwarg) or self.get_argument(page_kwarg, 1)
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return paginator, page, page.object_list, page.has_other_pages()
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True):
"""Return an instance of the paginator for this handler."""
return self.paginator_class(
queryset, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page)
def get_paginate_orphans(self):
"""
Return the maximum number of orphans extend the last page by when
paginating.
"""
return self.paginate_orphans
def get_allow_empty(self):
"""
Return ``True`` if the handler should display empty lists and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""Get the name of the item to be used in the context."""
if self.context_object_name:
return self.context_object_name
else:
return 'object_list'
async def get_context_data(self, *, object_list=None, **kwargs):
queryset = object_list if object_list is not None else self.object_list
if isinstance(queryset, Query):
queryset = await future_exec(queryset.all)
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_context_object_name(queryset)
context = {
'is_paginated': False,
}
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context['is_paginated'] = is_paginated
context[context_object_name] = self.serialize(queryset, many=True)
context.update(kwargs)
return await super().get_context_data(**context)
class SerializableMultipleObjectsMixin(SerializableMixin):
pass
class ListMixin(MultipleObjectMixin, SerializableMultipleObjectsMixin, RestAPIMixin):
async def get(self, *args, **kwargs):
# noinspection PyAttributeOutsideInit
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if self.get_paginate_by(self.object_list) is not None:
is_empty = not (await future_exec(self.object_list.exists))
else:
is_empty = not self.object_list
if is_empty:
raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.") % {
'class_name': self.__class__.__name__,
})
data = await self.get_context_data()
self.write_json(data=data)
class ListHandler(ListMixin, RequestHandler, UserHandlerMixin):
"""A handler for displaying a list of objects."""
|
#!/usr/bin/python
#
# Copyright (C) 2018 stephen.farrell@cs.tcd.ie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Read a CSV, one (default last) column of which is a domain name, then append
# a bunch of DNS derived values to the row and write out to the output file
import csv
import dns.resolver # install via: sudo apt install python-dnspython; sudo -H pip install dnspython
import pyping # install via: sudo -H pip install pyping
import os, sys, argparse, tempfile, gc
import json
import jsonpickle # install via "$ sudo pip install -U jsonpickle"
import time, datetime
from dateutil import parser as dparser # for parsing time from comand line and certs
# command line arg handling
argparser=argparse.ArgumentParser(description='Read a CSV, one (default last) column of which is a domain name, then append a bunch of DNS derived values to the row and write out to the output file')
argparser.add_argument('-i','--infile',
dest='infile',
help='CSV file containing list of domains')
argparser.add_argument('-o','--output_file',
dest='outfile',
help='CSV file in which to put records (one per line)')
argparser.add_argument('-c','--col',
dest='col',
help='column from input file that has DNS names')
args=argparser.parse_args()
def usage():
print >>sys.stderr, "usage: " + sys.argv[0] + " -i <in.csv> -o <out.csv> [-c <col1>]"
print >>sys.stderr, " read domain names from colum (default: last) of in.csv, do DNS queries and write out to out.csv"
sys.exit(1)
if args.infile is None:
usage()
if args.outfile is None:
usage()
col=-1
if args.col:
col=int(args.col)
myResolver = dns.resolver.Resolver() #create a new instance named 'myResolver'
# We need to be root to run the pyping thing (ick)
amRoot=False
if not os.geteuid() == 0:
amRoot=True
queries = [ "A", "AAAA", "MX", "DS", "CAA", "SPF", "TXT" ]
of=open(args.outfile,'w')
wr=csv.writer(of)
count=0
with open(args.infile, 'r') as f:
r = csv.reader(f)
for row in r:
dom=row[col]
print "Doing " + dom
domarr = []
domarr.append(dom)
for query in queries:
pas = []
pas.append("ping"+query)
pasok=False
domarr.append(query)
try:
answer = myResolver.query(dom, query)
for rdata in answer:
domarr.append(str(rdata))
row.append(str(rdata))
# see if a ping works
if amRoot and (query == "A" or query == "AAAA"):
try:
pa = pyping.ping(str(rdata))
if pa.ret_code == 0:
pas.append(str(pa.avg_rtt))
pasok=True
else:
pas.append("icmp-bummer-non-exception")
except:
pas.append("icmp-bummer")
except:
domarr.append("bummer")
row.append('no '+query)
if pasok:
domarr.append(pas)
#print row
wr.writerow(row)
count += 1
if count % 10 == 0:
print >>sys.stderr, "Did " + str(count) + " last: " + dom
of.close()
|
#!/usr/bin/env python
import gevent.monkey; gevent.monkey.patch_all()
import sys
from argparse import ArgumentParser
import yaml
from rtmbot import RtmBot
import logging
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
# load args with config path
args = parse_args()
config = yaml.load(open(args.config or 'rtmbot.conf', 'r'))
bot = RtmBot(config)
while True:
try:
logging.info('starting bot')
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
logging.exception("Something wrong happened, restarting...")
|
"""
Tools for visualization, including metrics plots, and generating the html
graph of the nodes in a pipeline
"""
from .plot_metrics import plot_metrics
|
### Base class for Collage plugins ###
import math
import logging
import pygame
from plugin import Plugin
class Collage(Plugin):
"""
Base class for collage plugins
See simple_resize.py or recursive_split.py for example implementation of a plugin
"""
def __init__(self, config):
super(Collage, self).__init__()
self.config = config
self.wallpaper_source = None
@staticmethod
def get_instances(plugins, config):
collages = config['collage-plugins']
if not collages == 'all':
collages = collages.split(',')
collages = [c.strip() for c in collages]
instances = []
for plugin in plugins:
if plugin.name in collages or collages == 'all':
instances.append(plugin(config))
if type(collages) is list:
collages.remove(plugin.name)
for collage_exception in collages:
logging.warning('Collage %s not found' % collage_exception)
return instances
def set_source(self, source):
self.wallpaper_source = source
def generate(self, size, wallpaper_queue):
"""
Generates the wallpaper collage
"""
raise NotImplementedError()
def _resize_wallpaper(self, wallpaper, size):
"""
Resizes wallpaper to set size, conserves aspect ratio
Returns crop co-ordinates and scaled image
"""
# find ratios
width_ratio = 1.0*size[0]/wallpaper.get_width()
height_ratio = 1.0*size[1]/wallpaper.get_height()
# resize to fit width
if width_ratio > height_ratio:
new_size = (size[0], int(math.ceil(wallpaper.get_height()*width_ratio)))
# resize to fit height
else:
new_size = (int(math.ceil(wallpaper.get_width()*height_ratio)), size[1])
# scale wallpaper according to new_size
try:
wallpaper = pygame.transform.smoothscale(wallpaper, new_size)
except ValueError:
logging.debug('bit-depth error, using crappy scaling')
wallpaper = pygame.transform.scale(wallpaper, new_size)
# Height or width might be too large
crop = (0, 0)
if wallpaper.get_width() > size[0]+1:
overflow = wallpaper.get_width() - size[0]
margin = int(overflow / 2)
crop = (margin, 0)
elif wallpaper.get_height() > size[1]+1:
overflow = wallpaper.get_height() - size[1]
margin = int(overflow / 2)
crop = (0, margin)
return crop, wallpaper
|
# The code is based on code publicly available at
# https://github.com/rosinality/stylegan2-pytorch
# written by Seonghyeon Kim.
import math
import random
import torch
from torch import nn
from torch.nn import functional as F
from models.gan.stylegan2.op import FusedLeakyReLU
from models.gan.stylegan2.layers import PixelNorm, Upsample, Blur
from models.gan.stylegan2.layers import EqualLinear
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
f'upsample={self.upsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
input = input.view(1, batch * in_channel, height, width)
if self.upsample:
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
else:
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.const = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.const.repeat(batch, 1, 1, 1)
return out
class StyleLayer(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size, style_dim,
upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
out = self.activate(out)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class Generator(nn.Module):
def __init__(self, size,
style_dim=512, n_mlp=8, channel_multiplier=2,
blur_kernel=[1, 3, 3, 1], lr_mlp=0.01, small32=False):
super().__init__()
self.size = size
self.style_dim = style_dim
layers = [PixelNorm()]
for i in range(n_mlp):
layers.append(EqualLinear(style_dim, style_dim,
lr_mul=lr_mlp,
activation='fused_lrelu'))
self.style = nn.Sequential(*layers)
if small32:
self.channels = {
4: 512,
8: 512,
16: 256,
32: 128,
}
else:
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: int(256 * channel_multiplier),
128: int(128 * channel_multiplier),
256: int(64 * channel_multiplier),
512: int(32 * channel_multiplier),
1024: int(16 * channel_multiplier),
}
self.input = ConstantInput(self.channels[4])
self.conv1 = StyleLayer(
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
)
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
self.log_size = int(math.log(size, 2))
self.num_layers = (self.log_size - 2) * 2 + 1
self.layers = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
in_channel = self.channels[4]
for i in range(3, self.log_size + 1):
out_channel = self.channels[2 ** i]
self.layers.append(
StyleLayer(in_channel, out_channel, 3, style_dim,
upsample=True, blur_kernel=blur_kernel)
)
self.layers.append(
StyleLayer(out_channel, out_channel, 3, style_dim,
blur_kernel=blur_kernel)
)
self.to_rgbs.append(ToRGB(out_channel, style_dim))
in_channel = out_channel
self.n_latent = self.log_size * 2 - 2
@property
def device(self):
return self.input.const.device
def make_noise(self):
noises = []
for layer_idx in range(self.num_layers):
res = (layer_idx + 5) // 2
shape = [1, 1, 2 ** res, 2 ** res]
noises.append(torch.randn(*shape, device=self.device))
return noises
def mean_latent(self, n_latent):
latent_in = torch.randn(
n_latent, self.style_dim, device=self.device
)
latent = self.style(latent_in).mean(0, keepdim=True)
return latent
def get_latent(self, input):
return self.style(input)
def sample_latent(self, num_samples):
return torch.randn(num_samples, self.style_dim, device=self.device)
def forward(self, input,
return_latents=False,
style_mix=0.9,
input_is_latent=False,
noise=None):
latent = self.style(input) if not input_is_latent else input
if noise is None:
noise = [None] * self.num_layers
if latent.ndim < 3:
latents = latent.unsqueeze(1).repeat(1, self.n_latent, 1)
else:
latents = latent
if self.training and (style_mix > 0):
batch_size = input.size(0)
latent_mix = self.style(self.sample_latent(batch_size))
latent_mix = latent_mix.unsqueeze(1)
nomix_mask = torch.rand(batch_size) >= style_mix
mix_layer = torch.randint(self.n_latent, (batch_size,))
mix_layer = mix_layer.masked_fill(nomix_mask, self.n_latent)
mix_layer = mix_layer.unsqueeze(1)
layer_idx = torch.arange(self.n_latent)[None]
mask = (layer_idx < mix_layer).float().unsqueeze(-1)
mask = mask.to(latents.device)
latents = latents * mask + latent_mix * (1 - mask)
out = self.input(latents)
out = self.conv1(out, latents[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latents[:, 1])
idx = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
self.layers[::2], self.layers[1::2],
noise[1::2], noise[2::2], self.to_rgbs
):
out = conv1(out, latents[:, idx], noise=noise1)
out = conv2(out, latents[:, idx+1], noise=noise2)
skip = to_rgb(out, latents[:, idx+2], skip)
idx += 2
image = skip
image = 0.5 * image + 0.5
if not self.training:
image = image.clamp(0, 1)
if return_latents:
return image, latents
else:
return image
|
import numpy as np
from scipy.special import erfc, erfcinv
def phi(input):
"""Phi function.
:param input:
Input value.
:returns:
Phi(input).
"""
return 0.5 * erfc(-input/np.sqrt(2))
def invphi(input):
"""Inverse of Phi function.
:param input:
Input value.
:returns:
Inverse of Phi(input).
"""
return -1 * np.sqrt(2) * erfcinv(input/0.5)
|
from .base_unit import BaseUnit
from ..utils import ListUtils
from .transport_event import TransportEvent
class LinkElement(BaseUnit):
def __init__(self):
BaseUnit.__init__(self)
self.sources = None
self.destinations = None
self.guards = None
self.accepts = None
def add_source(self, source):
self.sources = ListUtils.add_or_create(self.sources, source)
def add_destination(self, destination):
self.destinations = ListUtils.add_or_create(self.destinations, destination)
def add_guard(self, guard):
self.guards = ListUtils.add_or_create(self.guards, guard)
def add_accept(self, accept):
self.accepts = ListUtils.add_or_create(self.accepts, accept)
def accept(self, event):
self_event = TransportEvent.of(event, self)
if ListUtils.is_empty_or_any(self.accepts, self_event.id) and\
ListUtils.is_empty_or_any_expr(self.guards, lambda guard: guard(self_event)):
for destination in self.destinations:
destination.accept(self_event)
|
# Copyright (c) 2020 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import numpy as np
from nnabla_nas.module import static as smo
from nnabla_nas.module.parameter import Parameter
def test_join_module():
shape = (10, 3, 32, 32)
input_1 = smo.Input(nn.Variable(shape))
input_2 = smo.Input(nn.Variable(shape))
alpha = Parameter(shape=(2,))
alpha.d = np.array([1, 2])
join_linear = smo.Join([input_1, input_2],
join_parameters=alpha,
mode='linear')
join_sample = smo.Join([input_1, input_2],
join_parameters=alpha,
mode='sample')
join_max = smo.Join([input_1, input_2],
join_parameters=alpha,
mode='max')
assert join_linear().shape == (10, 3, 32, 32)
assert join_sample().shape == (10, 3, 32, 32)
assert join_max().shape == (10, 3, 32, 32)
assert join_max._idx == 1
assert join_max() == input_2._value
if __name__ == '__main__':
test_join_module()
|
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
map = dict()
map["2"] = ["a","b","c"]
map["3"] = ["d","e","f"]
map["4"] = ["g","h","i"]
map["5"] = ["j","k","l"]
map["6"] = ["m","n","o"]
map["7"] = ["p","q","r","s"]
map["8"] = ["t","u","v"]
map["9"] = ["w","x","y","z"]
ans = [""]
for d in digits:
temp = list()
cur = map[d]
for x in ans:
for c in cur:
temp.append(x+c)
ans = temp
if(ans[0]==""):
return []
return ans
|
from .base import Base
from collections import Counter
class SizePerType(Base):
def fill(self):
request_size = self.__sum_request_size_per_type()
metrics = []
for key, value in request_size.items():
if key is None or key == "":
continue
metric = {
"measurement": self.measurement(),
"tags": {**self.default_tags(), **self.media_type_tags(key)},
"time": self.time(),
"fields": {"value": value},
}
metrics.append(metric)
return metrics
def is_valid(self):
return True
def measurement(self):
return "size_per_type"
def __sum_request_size_per_type(self):
sum_size = {}
for req in self.requests:
content_type = req.get("contentType")
if content_type in sum_size:
object_size = req.get("objectSize")
if object_size != "" and object_size is not None:
sum_size[content_type] += int(object_size)
else:
sum_size[content_type] = 0
return sum_size
|
# -*- coding: utf-8 -*-
import IPython
import os
def get_ipython_dir():
return IPython.paths.get_ipython_dir()
def list_profiles(project_path, project=None, show_project=True):
if os.path.isdir(project_path):
return [
('{}:{}'.format(x, project) if project and show_project else x)
for x in os.listdir(project_path)
if os.path.isdir(os.path.join(project_path, x)) and
'ipython_config.py' in os.listdir(os.path.join(project_path, x))
]
return []
|
#Author: Maximilian Beckers, EMBL Heidelberg, Sachse Group (2018)
import numpy as np
import time
import argparse, os, sys
import mrcfile
import math
from FDRutil import *
#*************************************************************
#****************** Commandline input ************************
#*************************************************************
cmdl_parser = argparse.ArgumentParser(
prog=sys.argv[0], description='*** Analyse density ***',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30), add_help=True);
cmdl_parser.add_argument('-em', '--em_map', default=[], nargs='*', required=True, help='Input filename of EM-frame maps');
cmdl_parser.add_argument('-p', '--apix', metavar="apix", type=float, required=True, help='pixel Size of input map');
cmdl_parser.add_argument('-lowPassFilter', '--lowPassFilter', type=float, required=False, help='Resolution to lowPass filter');
cmdl_parser.add_argument('-addFrames', '--addFrames', type=int, required=False, help='add Frames');
cmdl_parser.add_argument('-firstFrame', '--firstFrame', type=int, required=False, help='first frame to be used, counting starts with 0');
cmdl_parser.add_argument('-lastFrame', '--lastFrame', type=int, required=False, help='last frame to be used, counting ends with numFrames-1');
#--------------------------------------------------------------------------
def kernelRegression(frameData, providedResolution):
#*****************************************
#*********** kernel regression ***********
#*****************************************
bandwidth = 3;
maps = np.copy(frameData);
sizeMap = maps.shape;
numFrames = sizeMap[3];
#if specified, filter all the frames to make fallof estimation more accurate
if providedResolution != 0:
frequencyMap = calculate_frequency_map(maps[ :, :, :, 0]);
for frameInd in range(sizeMap[3]):
maps[:, :, :, frameInd] = lowPassFilter(np.fft.rfftn(maps[:, :, :, frameInd]), frequencyMap, providedResolution, maps[ :, :, :, frameInd].shape);
#set all negative values to a very small positive value
maps[maps <= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
#maps = np.log(maps);
indexMap = np.zeros(sizeMap[3]);
for i in range(sizeMap[3]):
indexMap[i] = i+1.0;
x_mean = np.mean(indexMap);
y_mean = np.mean(maps, 3);
regrMap = np.zeros(sizeMap);
#do the actual kernel regression
for frameInd in range(numFrames):
regrMapDenom = 0;
for tmpFrameInd in range(numFrames):
dist = np.abs(tmpFrameInd - frameInd);
if dist > 4:
continue;
sampleWeight = (1.0/(np.sqrt(2*np.pi)*bandwidth)) * np.exp(-0.5*dist/(bandwidth**2));
regrMap[ :, :, :, frameInd] = regrMap[ :, :, :, frameInd] + sampleWeight*maps[ :, :, :, tmpFrameInd] ;
regrMapDenom = regrMapDenom + sampleWeight;
regrMap[ :, :, :, frameInd] = regrMap[ :, :, :, frameInd]/regrMapDenom;
#************************************
#*********** do plotting ************
#************************************
import matplotlib.pyplot as plt
fig, ax = plt.subplots(5, 5);
for row in ax:
for col in row:
xInd = np.random.randint(0, sizeMap[0]);
yInd = np.random.randint(0, sizeMap[1]);
zInd = np.random.randint(0, sizeMap[2]);
indices = np.arange(sizeMap[3]);
y1 = regrMap[ xInd, yInd, zInd, :];
y2 = maps[ xInd, yInd, zInd, :];
col.plot(indices, y1);
col.plot(indices, y2);
col.set_xticklabels([]);
col.set_yticklabels([]);
plt.savefig("Regression_falloff.pdf");
print("PDF saved ...");
plt.close();
#calculate weights
weightMap = np.copy(regrMap);
sumMap = np.sum(regrMap, 3);
sumMap = sumMap.astype(float);
sumMap[sumMap==0.0] = np.nan;
for frameInd in range(sizeMap[3]):
weightMap[:, :, :, frameInd] = weightMap[:, :, :, frameInd]/sumMap;
weightMap[np.isnan(weightMap)] = 1.0/numFrames;
return regrMap, weightMap;
#--------------------------------------------------------------------------
def linearizedModel(frameData, providedResolution):
#****************************************
#*********** fit linear model ***********
#****************************************
maps = np.copy(frameData);
sizeMap = maps.shape;
#print(sizeMap);
#if specified, filter all the frames to make fallof estimation more accurate
if providedResolution != 0:
frequencyMap = calculate_frequency_map(maps[ :, :, :, 0]);
for frameInd in range(sizeMap[3]):
maps[:, :, :, frameInd] = lowPassFilter(np.fft.rfftn(maps[:, :, :, frameInd]), frequencyMap, providedResolution, maps[ :, :, :, frameInd].shape);
#set all negative values to a very small positive value
maps[maps<= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
maps = np.log(maps);
indexMap = np.zeros(sizeMap[3]);
for i in range(sizeMap[3]):
indexMap[i] = i+1.0;
x_mean = np.mean(indexMap);
y_mean = np.mean(maps, 3);
#calc b1
S_xy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_xx = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
#S_yy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for i in range(sizeMap[3]):
S_xy = S_xy + (indexMap[i] - x_mean)*(maps[ :, :, :, i ] - y_mean);
S_xx = S_xx + (indexMap[i] - x_mean)**2;
#S_yy = S_yy + (maps[ :, :, :, i ] - y_mean)*(maps[ :, :, :, i ] - y_mean);
#calculate regression coefficients
b1 = np.divide(S_xy, S_xx);
b0 = y_mean - b1 * x_mean;
#calculate some residual statistics
#S_residuals = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
#for frameInd in range(sizeMap[3]):
# prediction = b0 + b1*(frameInd + 1.0);
# S_residuals = S_residuals + (maps[ :, :, :, i ] - prediction)**2;
#S_residuals[S_residuals == 0] = np.nan;
#calculate t-test upon b1, H_0: b1=0, H1: b1<0
#tTestMap = b1/(np.sqrt(S_residuals/(float(sizeMap[3]-2.0))*S_xx));
#np.random.shuffle(b1);
return b0, b1;
#--------------------------------------------------------------------------
def relativeSNR(weightMap, apix):
sizeMap = weightMap.shape;
equalWeightFactor = 1.0/float(sizeMap[3]);
S_xq = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_xx = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
S_yy = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for frameInd in range(sizeMap[3]):
S_xq = S_xq + weightMap[:,:,:, frameInd]*equalWeightFactor;
S_xx = S_xx + equalWeightFactor**2;
S_yy = S_yy + weightMap[:,:,:, frameInd]**2;
SNRmap = np.divide((np.sqrt(S_xx)*np.sqrt(S_yy)), S_xq);
#write the SNR map
SNRMapMRC = mrcfile.new('SNR.mrc', overwrite=True);
SNRmap = np.float32(SNRmap);
SNRMapMRC.set_data(SNRmap);
SNRMapMRC.voxel_size = apix;
SNRMapMRC.close();
return None;
#--------------------------------------------------------------------------
def weightedAverage(maps, weightMap):
indexMap = np.copy(maps);
sizeMap = maps.shape;
#do the weighted averaging
weightedAverage = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for frame in range(sizeMap[3]):
weightedAverage = weightedAverage + weightMap[ :, :, :, frame] * maps[ :, :, :, frame];
#variance map
#varMap = np.sum(weightMap**2 , 3);
#weightedAverage = np.divide(weightedAverage, np.sqrt(varMap)); #normalize the background variance
return weightedAverage;
#--------------------------------------------------------------------------
def optimizeWeights(fallOff):
numFrames = fallOff.size;
#get starting point
alphaStart = fallOff/np.sum(fallOff);
#alphaStart = np.random.rand(numFrames)/np.sum(fallOff);
x = alphaStart[1:];
x = gradientDescent(x, fallOff);
#transform back to alpha
alpha = np.append(np.ones(1), x);
basisTransform = np.identity(alpha.size);
basisTransform[0,:] = -1.0 * np.ones((alpha.size));
basisTransform[0,0] = 1.0;
#transform into the n-dimensional space
alphaFinal = np.matmul(basisTransform, alpha);
return alphaFinal;
#-------------------------------------------------------------------------
def calculateMask(maps):
meanMap = np.mean(maps,3);
mask = np.copy(meanMap);
mask[mask>0.0002] = 1;
mask[mask<1] = 0;
return mask;
#--------------------------------------------------------------------------
def calcR2(frameData, b0, b1, sizeMovie):
maps = np.copy(frameData);
#set all negative values to a very small positive value
maps[maps<= 0.0] = 1.0*10**(-6);
#do log-transform of maps to linearize data
maps = np.log(maps);
yMean = np.mean(maps, 3);
weightMap = np.zeros(sizeMovie);
#set all increasing fits to zero
b1[b1>0.0] = 0.0;
b0[b1==0.0] = yMean[b1==0];
#get falloff factor for all frames
for frameInd in range(sizeMovie[3]):
#weightMap[ :, :, :, frameInd] = b0 + (frameInd+1)*b1;
weightMap[ :, :, :, frameInd] = N0*np.exp((frameInd+1)*b1);
#get R2
weightMean = np.mean(weightMap, 3);
SQE = np.zeros((sizeMovie[0], sizeMovie[1], sizeMovie[2]));
SQT = np.zeros((sizeMovie[0], sizeMovie[1], sizeMovie[2]));
for frameInd in range(sizeMovie[3]):
SQE = SQE + (weightMap[:,:,:,frameInd] - weightMean)**2;
SQT = SQT + (maps[:,:,:,frameInd] - yMean)**2;
SQT[SQT==0.0] = np.nan;
R2 = np.divide(SQE,SQT);
R2[np.isnan(R2)] == 0;
return R2;
#--------------------------------------------------------------------------
def calculateWeights(b0, b1, sizeMovie, frameData, firstPatch):
maps = np.copy(frameData);
#maps[maps<= 0.0] = 1.0*10**(-6);
#maps = np.log(maps);
yMean = np.mean(maps, 3);
weightMap = np.zeros(sizeMovie);
#set all increasing fits to zero
b1[b1>0.0] = 0.0;
#b0[b1==0.0] = yMean[b1==0]
b0[b1==0.0] = np.nan;
N0 = np.exp(b0);
N0[np.isnan(N0)] = 0.0;
#get falloff factor for all frames
for frameInd in range(sizeMovie[3]):
#weightMap[ :, :, :, frameInd] = b0 + (frameInd+1)*b1;
weightMap[ :, :, :, frameInd] = N0*np.exp((frameInd+1)*b1);
#************************************
#*********** do plotting ************
#************************************
import matplotlib.pyplot as plt
fig, ax = plt.subplots(5, 5);
#index = np.array(np.nonzero(mask));
#sizeIndex= index.shape;
for row in ax:
for col in row:
#voxel = np.random.randint(0, sizeIndex[1]);
xInd = np.random.randint(0, sizeMovie[0]);
yInd = np.random.randint(0, sizeMovie[1]);
zInd = np.random.randint(0, sizeMovie[2]);
indices = np.arange(0,sizeMovie[3]);
y1 = weightMap[ xInd, yInd, zInd, :];
y2 = maps[ xInd, yInd, zInd, :];
col.plot(indices, y1);
col.plot(indices, y2);
col.set_xticklabels([]);
col.set_yticklabels([]);
plt.savefig("Regression_falloff.pdf");
plt.close();
#***********************************
#(xArr, yArr, zArr) = np.nonzero(testMap); #get indices of nonzero elements
#for frameInd in range(sizeMovie[3]):
# weightMap = weightMap[:, :, :, frameInd] + np.amin(weightMap, 3);
sumMap = np.sum(weightMap, 3);
sumMap = sumMap.astype(float);
sumMap[sumMap==0.0] = np.nan;
for frameInd in range(sizeMovie[3]):
weightMap[:, :, :, frameInd] = weightMap[:, :, :, frameInd]/sumMap;
weightMap[np.isnan(weightMap)] = 1.0/float(sizeMovie[3]);
return weightMap;
#--------------------------------------------------------------------------
def leastSquaresLoss(maps, lambdas, N0):
#*******************************************
#**** least squares with exp. decay *******
#*******************************************
sizeMap = maps.shape;
indMaps = np.zeros(sizeMap);
numFrames = sizeMap[3];
#get response map
unscaledResponseMap = np.zeros(sizeMap);
sumSqDiffs = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
d_lambda = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
d_N0 = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
for frameInd in range(numFrames):
unscaledResponseMap[ :, :, :, frameInd] = np.exp(-lambdas*(frameInd+1));
responseMap = N0 * unscaledResponseMap[ :, :, :, frameInd];
diff = responseMap[:,:,:] - maps[:,:,:,frameInd];
#sumSquaredDifferences
sumSqDiffs = sumSqDiffs + diff*diff;
#gradients
d_lambda = d_lambda + 2.0 * diff * responseMap[ :, :, :] * (-frameInd);
d_N0 = d_N0 + 2.0 * diff * responseMap[ :, :, :] * unscaledResponseMap[ :, :, :, frameInd];
return sumSqDiffs, d_lambda, d_N0;
#-------------------------------------------------------------------------
def getInitialMapStats(fileNameList):
tmpMap = mrcfile.open(fileNameList[0], mode='r+');
tmpMapData = np.copy(tmpMap.data);
#get the map stats
sizeMap = tmpMapData.shape;
patchSize = 200;
numXPatches = int(math.ceil((sizeMap[0])/float(patchSize)));
numYPatches = int(math.ceil((sizeMap[1])/float(patchSize)));
numZPatches = int(math.ceil((sizeMap[2])/float(patchSize)));
return numXPatches, numYPatches, numZPatches, patchSize, sizeMap;
#-------------------------------------------------------------------------
def main():
startTime = time.time();
#**********************************
#**** catch command line input ****
#**********************************
args = cmdl_parser.parse_args();
numFrames = len(args.em_map);
apix = args.apix;
if args.lowPassFilter is not None:
lowPassFilter = apix/args.lowPassFilter;
else:
lowPassFilter = 0;
if args.addFrames is None:
addFrames = 1;
else:
addFrames = args.addFrames;
if args.firstFrame is None:
firstFrame = 0;
else:
firstFrame = args.firstFrame;
if args.lastFrame is None:
lastFrame = numFrames-1;
else:
lastFrame = args.lastFrame;
#update number of frames
numFrames = lastFrame-firstFrame+1;
#some initialization
numXPatches, numYPatches, numZPatches, patchSize, sizeMap = getInitialMapStats(args.em_map);
splitFilename = os.path.splitext(os.path.basename(args.em_map[0]));
weightedMap = np.zeros(sizeMap);
b0Map = np.zeros((sizeMap[0], sizeMap[1], sizeMap[2]));
b1Map = np.zeros(sizeMap);
#*********************************
#*** print the filenames *********
#*********************************
print("Printing file names. Make sure they are in the correct order ...");
for filename in args.em_map:
print(filename);
#*********************************
#******* do the weighting ********
#*********************************
numPatches = 1;
for xPatchInd in range(numXPatches):
for yPatchInd in range(numYPatches):
for zPatchInd in range(numZPatches):
output = "Analyzing patch " + repr(numPatches) + " ...";
print(output);
if numPatches == 1:
firstPatch = True;
else:
firstPatch = False;
#**********************************
#********* read the maps **********
#**********************************
#define start and end indices for subsetting
xStart = patchSize*xPatchInd;
xEnd = np.minimum(patchSize*(xPatchInd+1), sizeMap[0]);
yStart = patchSize*yPatchInd;
yEnd = np.minimum(patchSize*(yPatchInd+1), sizeMap[1]);
zStart = patchSize*zPatchInd;
zEnd = np.minimum(patchSize*(zPatchInd+1), sizeMap[2]);
#read the individual frames
frameInd = 0;
addFrameInd = 0; #for adding subsequent frames
allFrameInd = 0;
frameCounter = 0;
for filename in args.em_map:
if frameCounter < firstFrame:
frameCounter = frameCounter + 1;
continue;
elif frameCounter > lastFrame:
break;
else:
frameCounter = frameCounter + 1;
tmpMap = mrcfile.open(filename, mode='r+');
tmpMapData = np.copy(tmpMap.data);
mapPatch = tmpMapData[xStart:xEnd, yStart:yEnd, zStart:zEnd];
if frameInd == 0:
tmpSizeMap = mapPatch.shape;
newNumFrames = int(math.ceil(numFrames/float(addFrames)));
maps = np.zeros((tmpSizeMap[0], tmpSizeMap[1], tmpSizeMap[2], newNumFrames));
tmpMapPatch = np.zeros(tmpSizeMap);
#print(maps.shape);
if addFrames == 1: #if no frame name reduction takes place
maps[ :, :, :, frameInd] = mapPatch;
mapPatch = 0;
frameInd = frameInd + 1;
else: #if subsequent frames shall be added
addFrameInd = addFrameInd + 1;
allFrameInd = allFrameInd + 1;
tmpMapPatch = tmpMapPatch + mapPatch;
if addFrameInd == addFrames:
tmpMapPatch = (1.0/float(addFrames))*tmpMapPatch;
maps[ :, :, :, frameInd] = np.copy(tmpMapPatch);
tmpMapPatch = np.zeros(tmpSizeMap);
mapPatch = 0;
frameInd = frameInd + 1;
addFrameInd = 0;
continue;
if allFrameInd == numFrames: #if some frames remain after frame reduction add them as well
remainingNumberOfFrames = numFrames%addFrames;
tmpMapPatch = (1.0/float(remainingNumberOfFrames))*tmpMapPatch;
maps[ :, :, :, frameInd] = np.copy(tmpMapPatch);
#print("assigning last part");
#**********************************
#******** calc averages ***********
#**********************************
b0, b1 = linearizedModel(maps, lowPassFilter);
#R2 = calcR2(maps, b0, b1, maps.shape);
#mask = calculateMask(maps);
#mask = np.ones(mask.shape);
weightMap = calculateWeights(b0, b1, maps.shape, maps, firstPatch);
#weightMap = np.ones(weightMap.shape)*1.0/float(numFrames);
#regrMap, weightMap = kernelRegression(maps, lowPassFilter);
weightedSum = weightedAverage(maps, weightMap);
b0Map[xStart:xEnd, yStart:yEnd, zStart:zEnd] = b0;
b1Map[xStart:xEnd, yStart:yEnd, zStart:zEnd] = b1;
weightedMap[xStart:xEnd, yStart:yEnd, zStart:zEnd] = weightedSum;
if numPatches == 1:
relativeSNR(weightMap, apix); #write the SNR map
numPatches = numPatches + 1;
#end of weighting
#write output
weightedMapMRC = mrcfile.new(splitFilename[0] + '_DW.mrc', overwrite=True);
weightedMap = np.float32(weightedMap);
weightedMapMRC.set_data(weightedMap);
weightedMapMRC.voxel_size = apix;
weightedMapMRC.close();
#write b0 and b1
b0MRC = mrcfile.new(splitFilename[0] + '_b0.mrc', overwrite=True);
b0Map = np.float32(b0Map);
b0MRC.set_data(b0Map);
b0MRC.voxel_size = apix;
b0MRC.close();
#write b0 and b1
b1MRC = mrcfile.new(splitFilename[0] + '_b1.mrc', overwrite=True);
b1Map = np.float32(b1Map);
b1MRC.set_data(b1Map);
b1MRC.voxel_size = apix;
b1MRC.close();
endTime = time.time();
runTime = endTime - startTime;
output = "Elapsed runtime was " + repr(runTime) + " seconds";
print(output);
if (__name__ == "__main__"):
main();
|
import pandas as pd
import numpy as np
from pandas_datareader import data as pdr
import yfinance
import datetime as dt
from sklearn import covariance
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
from pylab import rcParams
yfinance.pdr_override()
num_of_years = 10
start = dt.datetime.now() - dt.timedelta(int(365.25 * num_of_years))
end = dt.datetime.now()
#Setting up the mapping from ticker to country
etfs = {"EWJ":"Japan",
"EWZ":"Brazil",
"FXI":"China",
"EWY":"South Korea",
"EWT":"Taiwan",
"EWH":"Hong Kong",
"EWC":"Canada",
"EWG":"Germany",
"EWU":"United Kingdom",
"EWA":"Australia",
"EWW":"Mexico",
"EWL":"Switzerland",
"EWP":"Spain",
"EWQ":"France",
"EIDO":"Indonesia",
"ERUS":"Russia",
"EWS":"Singapore",
"EWM":"Malaysia",
"EZA":"South Africa",
"THD":"Thailand",
"ECH":"Chile",
"EWI":"Italy",
"TUR":"Turkey",
"EPOL":"Poland",
"EPHE":"Philippines",
"EWD":"Sweden",
"EWN":"Netherlands",
"EPU":"Peru",
"ENZL":"New Zealand",
"EIS":"Israel",
"EWO":"Austria",
"EIRL":"Ireland",
"EWK":"Belgium"}
symbols, names = np.array(sorted(etfs.items())).T
#Read in series of daily closing prices
df = pd.read_csv("input.csv", index_col=0)
#Convert price series to log return series
df = np.log1p(df.pct_change()).iloc[1:]
#Calling Glasso algorithm
edge_model = covariance.GraphicalLassoCV(cv=10)
df /= df.std(axis=0)
df = df.dropna()
edge_model.fit(df)
#the precision(inverse covariance) matrix that we want
p = edge_model.precision_
rcParams['figure.figsize'] = 15,10
sns.heatmap(p)
plt.show()
#prepare the matrix for network illustration
p = pd.DataFrame(p)
links = p.stack().reset_index()
links.columns = ['var1', 'var2','value']
links=links.loc[ (abs(links['value']) > 0.17) & (links['var1'] != links['var2']) ]
#build the graph using networkx lib
G=nx.from_pandas_edgelist(links,'var1','var2', create_using=nx.Graph())
pos = nx.spring_layout(G, k=0.2*1/np.sqrt(len(G.nodes())), iterations=20)
plt.figure(3, figsize=(15, 15))
nx.draw(G, pos=pos)
nx.draw_networkx_labels(G, pos=pos)
plt.show()
nx.write_gexf(G, 'graph.gexf')
|
"""
©Pulzar 2018-20
#Author : Brian Turza
version: 0.4
#Created : 14/9/2019 (this version)
"""
from Lib.math.main import *
import numpy as np
import re
class Parser:
def __init__(self, token_stream, include):
self.tokens = token_stream
self.include = include
self.ast = {'main_scope': []}
self.symbol_table = []
self.isConsole = True
self.lines = 1
self.token_index = 0
self.nesting_count = 0
self.error = False
def parse(self, token_stream):
"""
This function takes tokens from lexer and procces them #TODO
"""
count = 0
while self.token_index < len(token_stream):
token_type = self.tokens[self.token_index][0]
token_value = self.tokens[self.token_index][1]
# If token == echo add tokens to parse_include()
if self.error:
return [self.ast, self.isConsole, self.error]
if token_type == "KEYWORD" and token_value == "include":
self.parse_include(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "Program":
self.parse_program(token_stream[self.token_index:len(token_stream)], False)
count += 1
elif token_type == "DATATYPE":
self.parse_decl_variable(token_stream[self.token_index:len(token_stream)], False)
# Check if it was already dececlared
elif token_type == "BUILT_IN_FUNCTION":
self.parse_builtin(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "MATH_FUNCTION":
self.parse_math(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "if" or token_value == "else" or token_value == "elseif":
self.parse_conditional_statements(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "for":
self.parse_loop(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "while":
self.parse_loop(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "func":
self.parse_func(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "class":
self.parse_class(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "COMMENT" and token_value == r"\\":
self.parse_single_line_comment(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "COMMENT" and token_value == "|**":
self.parse_multi_line_comment(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "KEYWORD" and token_value == "macros":
self.parse_macros(token_stream[self.token_index:len(token_stream)])
elif token_type == "KEYWORD" and token_value == "match":
self.parse_match(token_stream[self.token_index:len(token_stream)], False)
elif token_type == "NEWLINE": self.lines += 1
try: # If last token pass to this, it would throw error
if token_type == "IDENTIFIER" and token_stream[self.token_index + 1][0] == "COLON":
self.call_func(token_stream[self.token_index:len(token_stream)], False)
except:
pass
try:
if token_type == "IDENTIFIER" and self.tokens[self.token_index + 1][1] == "=" or token_type == "IDENTIFIER" and self.tokens[self.token_index + 1][0] == "INCREMENT_OPERATOR":
self.parse_variable(token_stream[self.token_index:len(token_stream)], False)
except IndexError: pass
if token_type == "UNDEFINIED":
# TODO Identify better errors
self.error_message("SyntaxError: \n Undefinied")
self.token_index += 1
# If no Program declaration is found in code, calls a error message
if count == 0:
msg = "SyntaxError at line {}:\nProgram must be definied".format(self.lines)
self.error_message(msg, token_stream, self.token_index)
return [self.ast, self.isConsole, self.error]
def parse_include(self, token_stream, inScope):
tokens_checked = 0
lib = ""
ast = {'Include': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type in ["SEMIC"]: break
if token == 1 and token_type != "STRING":
lib = "Lib.{}.main".format(token_value)
try:
import importlib
importlib.import_module(lib)
except ImportError:
msg = "IncludeError at line {}:\n'{}' isnt recognized as libary or pulzar file".format(self.lines, token_value)
self.error_message(msg, token_stream, token)
elif token == 1 and token_type == "STRING":
lib = token_value
tokens_checked += 1
ast['Include'].append({'libary': lib})
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_math(self, token_stream, inScope):
value = ""
tokens_checked = 0
ast = {'math': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SEMIC": break
if token == 0: ast['math'].append({'function': token_value})
if token == 1 and token_type in ["INT", "ID"]:
value = token_value
elif token == 1 and token_type not in ["INTEGER", "IDENTIFIER"]:
msg = "Error: '" + token_value + "' must be int"
self.error_message(msg, token_stream, token)
elif token > 1 and token % 2 == 0:
value += token_value
tokens_checked += 1
ast['math'].append({'arguments': value})
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_program(self, token_stream, inScope):
tokens_checked = 0
ast = {'program': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SEMIC": break
elif token == 1 and token_value in ["Program", "Console", "Browser"]:
ast['program'].append({'type': token_value})
if token_value == "Browser":
self.isConsole = False
elif token == 1 and token_value not in ["Program", "Console", "Browser"]:
self.error_message("Program error: undefinied program '{}'".format(token_value))
tokens_checked += 1
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_decl_variable(self, token_stream, inScope):
tokens_checked = 0
ast = {'variable_declaration': []}
value = ""
typ8 = ""
c = False
var_decl = False
square_root = False
dots = False
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
# If semic is found loop breaks
if token_type in ["SEMIC", "NEWLINE"]:
break
elif token == 0 and token_stream[2][0] == "SEMIC":
ast['variable_declaration'].append({'type': token_value})
typ8 = token_value
ast['variable_declaration'].append({'name': token_stream[1][1]})
if token == 0 and token_value in ["var", "int", "float"]:
ast['variable_declaration'].append({'value': '0'})
elif token == 0 and token_value == "complex":
ast['variable_declaration'].append({'value': 'Complex()'})
elif token == 0 and token_value == "bool":
ast['variable_declaration'].append({'value': 'None'})
elif token == 0 and token_value == "str":
ast['variable_declaration'].append({'value': '""'})
elif token == 0 and token_value == "char":
ast['variable_declaration'].append({'value': "''"})
var_decl = True
break
elif token == 0 and token_stream[2][0] != "SEMIC":
ast['variable_declaration'].append({'type': token_value})
typ8 = token_value
elif token == 1 and token_type == "IDENTIFIER":
ast['variable_declaration'].append({'name': token_value})
elif token == 1 and token_type != "IDENTIFIER":
msg = "SyntaxError at line"+ str(self.lines) +":\nInvalid variable name '" + token_value + "'"
self.error_message(msg, token_stream, token)
elif token == 2 and token_type not in ["OPERATOR", "INCREMENT_OPERATOR"]:
msg = "SyntaxError at line {}\n:Invalid operator '{}'".format(self.lines, token_value)
self.error_message(msg, token_stream, token)
elif token == 3 and token_type == "IDENTIFIER" and token_value not in constants and token_stream[tokens_checked + 1][1] != ":":
value = str(token_value)
elif token == 3 and token_type == "IDENTIFIER" and token_value in constants:
value = "constants['{}']".format(token_value)
elif token == 3 and token_type == "STRING":
value = token_value.replace('\s', ' ')
elif token == 3 and token_type == "COMPLEX_NUMBER":
value = str(token_value) + "j"
c = True
elif token == 3 and token_type == "SQUARE_ROOT":
if re.match("[a-z]", token_value) or re.match("[A-Z]", token_value):
token_value = self.get_token_value(token_value)
if token_value[len(token_value) - 1] in ["i", "j"]:
value = str(np.sqrt(complex(token_value)))
else:
value = str(np.sqrt(float(token_value)))
elif token == 3 and token_type not in ["COMPLEX_NUMBER", "STRING", "FACTORIAL"]:
value = str(token_value)
elif token > 3 and token_type not in ["COMPLEX_NUMBER", "FACTORIAL", "OPERATOR", "SQUARE_ROOT", "IDENTIFIER", "ELLIPSIS_OPERATOR"]:
value += str(token_value)
elif token > 3 and token_type == "OPERATOR":
value += str(token_value.replace('^', '**'))
elif token > 3 and token_type == "ELLIPSIS_OPERATOR":
value += str(token_value)
dots = True
elif token == 3 and token_type == "FACTORIAL":
math = MathModule()
value = str(math.factorial(int(token_value)))
elif token > 3 and token_type == "COMPLEX_NUMBER":
value += str(token_value) + "j"
c = True
elif token > 3 and token_type == "FACTORIAL":
math = MathModule()
value += str(math.factorial(int(token_value)))
elif token > 3 and token_type == "IDENTIFIER" and token_value in constants:
value += "constants['{}']".format(token_value)
elif token > 3 and token_type == "IDENTIFIER":
value += str(token_value)
elif token > 3 and token_type == "SQUARE_ROOT":
if re.match("[a-z]", token_value) or re.match("[A-Z]", token_value):
token_value = self.get_token_value(token_value)
if token_value[len(token_value) - 1] in ["i", "j"]:
value += str(np.sqrt(complex(token_value)))
else:
value += str(np.sqrt(float(token_value)))
elif token >= 3 and token_type in ["DATATYPE", "KEYWORD"]:
msg = "SyntaxError at line "+ str(self.lines) +":\nInvalid variable value '" + token_value + "'"
self.error_message(msg, token_stream, token)
tokens_checked += 1
if dots:
value = str(self.get_tokens_range(value))
#----------------------------------------------------------
#TYPE CHECKING & EVALUATION:
def type_check(value):
string = True
if "[" in value and "]" in value:
return
if re.match("[0-9]", value) or value in ["True", "False", "None"] or "constants" in value:
string = False
if typ8 == "str" and string:
value = str(value)
elif typ8 == "str" and string == False:
msg = "TypeError at line %s:\nDeclared wrong data type, %s is not string" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "char" and string and len(value) == 1:
value = str(value)
elif typ8 == "char" and string == False or typ8 == "char" and len(value) > 3:
msg = "TypeError at line %s:\nDeclared wrong data type, %s is not char" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "int" and string == False and value not in ["True", "False", "None"]:
try:
value = eval(value)
value = int(value)
except NameError:
pass
elif typ8 == "int" and string == True or typ8 == "int" and value in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not integer" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "float" and string == False and value not in ["True", "False", "None"]:
try:
value = eval(value)
value = float(value)
except NameError:
pass
elif typ8 == "float" and string == True or typ8 == "float" and value in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not float" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "complex" and string == False and value not in ["True", "False", "None"]:
try:
value = eval(value)
value = 'Complex({}, {})'.format(value.real, value.imag)
except NameError:
pass
elif typ8 == "complex" and string == True or typ8 == "complex" and value in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not complex number" % (
self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "bool" and value in ["True", "False", "None"]:
try:
value = bool(value)
except NameError:
pass
elif typ8 == "bool" and value not in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not boolean" % (self.lines, value)
self.error_message(msg, token_stream, token)
if var_decl == False:
string = True
type_check(value)
#---------------------------------------------------------
if var_decl == False:
ast['variable_declaration'].append({'value': value})
if inScope == False:
self.ast['main_scope'].append(ast)
self.symbol_table.append([ast['variable_declaration'][0]['type'], ast['variable_declaration'][1]['name'], ast['variable_declaration'][2]['value']]) # type, name, value
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_variable(self, token_stream, inScope):
tokens_checked = 0
ast = {'variable_declaration': []}
value = ""
typ8 = ""
c = False
var_decl = False
square_root = False
dots = False
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
# If semic is found loop breaks
if token_type in ["SEMIC", "NEWLINE"]:
break
elif token == 0 and token_type == "IDENTIFIER":
typ8 = self.get_token_type(token_value)
ast['variable_declaration'].append({'type': typ8})
ast['variable_declaration'].append({'name': token_value})
elif token == 0 and token_type != "IDENTIFIER":
msg = ("SyntaxError at line "+ str(self.lines) +"\nInvalid variable name '" + token_value + "'")
self.error_message(msg, token_stream, token)
elif token == 1 and token_type not in ["OPERATOR", "INCREMENT_OPERATOR"]:
msg = "SyntaxError at line {}:\nInvalid operator '{}'".format(self.lines, token_value)
self.error_message(msg, token_stream, token)
elif token == 2 and token_type == "IDENTIFIER" and token_value not in constants and token_stream[tokens_checked + 1][1] != ":":
value = str(token_value)
elif token == 2 and token_type == "IDENTIFIER" and token_value in constants:
value = "constants['{}']".format(token_value)
elif token == 2 and token_type == "STRING":
value = token_value.replace('\s', ' ')
elif token == 2 and token_type == "COMPLEX_NUMBER":
value = str(token_value) + "j"
c = True
elif token == 2 and token_type == "SQUARE_ROOT":
if re.match("[a-z]", token_value) or re.match("[A-Z]", token_value):
token_value = self.get_token_value(token_value)
if token_value[len(token_value) - 1] in ["i", "j"]:
value = str(np.sqrt(complex(token_value)))
else:
value = str(np.sqrt(float(token_value)))
elif token == 2 and token_type not in ["COMPLEX_NUMBER", "STRING", "FACTORIAL"]:
value = str(token_value)
elif token > 2 and token_type not in ["COMPLEX_NUMBER", "FACTORIAL", "OPERATOR", "SQUARE_ROOT", "ELLIPSIS_OPERATOR"]:
value += str(token_value)
elif token > 2 and token_type == "OPERATOR":
value += str(token_value.replace('^', '**'))
elif token > 2 and token_type == "ELLIPSIS_OPERATOR":
value += str(token_value)
dots = True
elif token == 2 and token_type == "FACTORIAL":
math = MathModule()
value = str(math.factorial(int(token_value)))
elif token > 2 and token_type == "COMPLEX_NUMBER":
value += str(token_value) + "j"
c = True
elif token > 2 and token_type == "FACTORIAL":
math = MathModule()
value += str(math.factorial(int(token_value)))
elif token > 2 and token_type == "IDENTIFIER" and token_value in constants:
value += "constants['{}']".format(token_value)
elif token > 2 and token_type == "SQUARE_ROOT":
if re.match("[a-z]", token_value) or re.match("[A-Z]", token_value):
token_value = self.get_token_value(token_value)
if token_value[len(token_value) - 1] in ["i", "j"]:
value += str(np.sqrt(complex(token_value)))
else:
value += str(np.sqrt(float(token_value)))
tokens_checked += 1
if dots:
value = str(self.get_tokens_range(value))
#TYPE CHECKING & EVALUATION:
#----------------------------------------------------------
string = True
def type_check(value):
if re.match("[0-9]", value) or value in ["True", "False", "None"]:
string = False
if typ8 == "str" and string:
value = str(value)
elif typ8 == "str" and string == False:
msg = "TypeError at line %s:\nDeclared wrong data type, %s is not string" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "char" and string and len(value) == 1:
value = str(value)
elif typ8 == "char" and string == False or typ8 == "char" and len(value) > 3:
msg = "TypeError at line %s:\nDeclared wrong data type, %s is not char" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "int" and string == False and value not in ["True", "False", "None"]:
try:
value = eval(value)
value = int(value)
except NameError:
pass
elif typ8 == "int" and string == True or typ8 == "int" and value in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not integer" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "float" and string == False and value not in ["True", "False", "None"]:
try:
value = eval(value)
value = float(value)
except NameError:
pass
elif typ8 == "float" and string == True or typ8 == "float" and value in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not float" % (self.lines, value)
self.error_message(msg, token_stream, token)
if typ8 == "bool" and value in ["True", "False", "None"]:
try:
value = bool(value)
except NameError:
pass
elif typ8 == "bool" and value not in ["True", "False", "None"]:
msg = "TypeError at line %s:\nDeclared wrong data type, '%s' is not boolean" % (self.lines, value)
self.error_message(msg, token_stream, token)
#---------------------------------------------------------
if var_decl == False:
ast['variable_declaration'].append({'value': value})
if inScope == False:
self.ast['main_scope'].append(ast)
for i in self.symbol_table:
if i[1] == ast['variable_declaration'][1]['name']:
#Change delcared varaible value to this one
i[2] = ast['variable_declaration'][2]['value']
self.token_index += tokens_checked
return [ast, tokens_checked]
def get_scope(self, token_stream):
nesting_count = 1
tokens_checked = 0
scope_tokens = []
for token in token_stream:
tokens_checked += 1
token_value = token[1]
token_type = token[0]
if token_type == "SCOPE_DEFINIER" and token_value == "{":
nesting_count += 1
elif token_type == "SCOPE_DEFINIER" and token_value == "}":
nesting_count -= 1
if nesting_count == 0:
scope_tokens.append(token)
break
else:
scope_tokens.append(token)
return [scope_tokens, tokens_checked]
def parse_scope(self, token_stream, statement_ast, astName, isNested, macros, match_case=False):
ast = {'scope': []}
tokens_checked = 0
lines = 1
while tokens_checked < len(token_stream):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if match_case:
case = self.parse_case(token_stream[tokens_checked + 1:len(token_stream)])
ast['scope'].append(case[0])
tokens_checked += case[1]
# If token is echo add tokens to parse_include()
if token_type == "KEYWORD" and token_value == "include":
include = self.parse_include(token_stream[tokens_checked:len(token_stream)])
ast['scope'].append(include[0])
tokens_checked += include[1]
elif token_type == "DATATYPE":
var = self.parse_decl_variable(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(var[0])
tokens_checked += var[1]
elif token_type == "IDENTIFIER" and token_stream[tokens_checked + 1][1] == "=" or token_type == "IDENTIFIER" and token_stream[tokens_checked + 1][0] == "INCREMENT_OPERATOR":
varx = self.parse_variable(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(varx[0])
tokens_checked += varx[1]
elif token_type == "BUILT_IN_FUNCTION":
builtin = self.parse_builtin(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(builtin[0])
tokens_checked += builtin[1]
elif token_type == "MATH_FUNCTION":
math = self.parse_math(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(math[0])
tokens_checked += math[1]
elif token_type == "KEYWORD" and token_value == "if" or token_value == "else" or token_value == "elseif":
condtitional = self.parse_conditional_statements(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(condtitional[0])
tokens_checked += condtitional[1] - 1
elif token_type == "KEYWORD" and token_value == "for":
loop = self.parse_loop(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(loop[0])
tokens_checked += loop[1]
elif token_type == "KEYWORD" and token_value == "while":
loop = self.parse_loop(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(loop[0])
tokens_checked += loop[1]
elif token_type == "KEYWORD" and token_value == "func":
function = self.parse_func(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(function[0])
tokens_checked += function[1]
elif token_type == "KEYWORD" and token_value == "return":
return_statement = self.parse_return(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(return_statement[0])
tokens_checked += return_statement[1]
elif token_type == "COMMENT" and token_value == r"\\":
comment = self.parse_single_line_comment(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(comment[0])
tokens_checked += comment[1]
elif token_type == "COMMENT" and token_value == "|**":
comment = self.parse_multi_line_comment(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(comment[0])
tokens_checked += comment[1]
elif macros == True and token_value == "define":
define = self.parse_macros_define(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(define[0])
tokens_checked += define[1]
try: # If last token pass to this, it would throw error
if token_type == "IDENTIFIER" and token_stream[tokens_checked + 1][0] == "COLON":
run = self.call_func(token_stream[tokens_checked:len(token_stream)], True)
ast['scope'].append(run[0])
tokens_checked += run[1]
except:
pass
if token_type == "NEWLINE":
self.lines += 1
if token_value == "}":
self.nesting_count += 1
tokens_checked += 1
self.token_index += self.nesting_count + 1
self.lines -= 1
statement_ast[astName].append(ast)
if isNested == False:
self.ast['main_scope'].append(statement_ast)
def parse_builtin(self, token_stream, inScope):
tokens_checked = 0
value = ""
ast = {'builtin_function': []}
execute = False
dots = False
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SEMIC": break
if token == 0 and token_type == "BUILT_IN_FUNCTION":
ast['builtin_function'].append({'function': token_value})
elif token == 1 and token_type == "IDENTIFIER" and token_value not in constants:
if token_stream[0][1] == "execute":
value = self.get_token_value(token_value)
elif token_stream[0][1] == "input":
ast['builtin_function'].append({'type' : self.get_token_type(token_value)})
value = str(token_value)
else:
value = str(token_value)
elif token == 1 and token_type == "IDENTIFIER" and token_value in constants:
value = "constants['{}']".format(token_value)
elif token == 1 and token_type not in ["IDENTIFIER", "FACTORIAL", "SQUARE_ROOT"]:
value = token_value
elif token == 1 and token_type == "FACTORIAL":
math = MathModule()
value = str(math.factorial(int(token_value)))
elif token == 1 and token_type == "SQUARE_ROOT":
if re.match("[a-z]", token_value) or re.match("[A-Z]", token_value):
token_value = str(self.get_token_value(token_value))
if "Complex(" in token_value and ")" in token_value:
value = str(np.sqrt(token_value))
else:
value = str(np.sqrt(float(token_value)))
elif token > 1 and token_type == "ELLIPSIS_OPERATOR":
value += str(token_value)
dots = True
elif token > 1 and token_type == "FACTORIAL":
math = MathModule()
value += str(math.factorial(int(token_value)))
elif token > 1 and token_type not in ["FACTORIAL", "OPERATOR", "IDENTIFIER"]:
value += str(token_value)
elif token > 1 and token_type == "OPERATOR":
value += str(token_value.replace('^', '**'))
elif token > 1 and token_type == "IDENTIFIER" and token_value not in constants:
if token_stream[0][1] == "execute":
value += self.get_token_value(token_value)
else:
value += str(token_value)
elif token > 1 and token_type == "IDENTIFIER" and token_value in constants:
value += "constants['{}']".format(token_value)
tokens_checked += 1
if dots:
value = str(self.get_tokens_range(value))
if type(value) == int:
value = int(value)
elif type(value) == float:
value = float(value)
elif type(value) == complex:
fmath = MathModule()
value = fmath.complex(value)
ast['builtin_function'].append({'argument': value})
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_return(self, token_stream, inScope):
tokens_checked = 0
value = ""
ast = {'return': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SEMIC": break
if token == 1 and token_type == "IDENTIFIER":
value = token_value
elif token == 1 and token_type == "IDENTIFIER" and token_stream[tokens_checked + 1][0] == "COLON":
value = token_value
elif token == 1 and token_type != "IDENTIFIER":
value = token_value
elif token == 1 and token_type == "FACTORIAL":
math = MathModule()
value = str(math.factorial(int(token_value)))
elif token > 1 and token_type == "FACTORIAL":
math = MathModule()
value += str(math.factorial(int(token_value)))
elif token > 1 and token_type != "FACTORIAL":
value += token_value
tokens_checked += 1
if type(value) in [int, float]:
try:
value = eval(value)
except:
pass
elif type(value) == float:
value = float(value)
elif type(value) == complex:
try:
value = complex(value)
except:
pass
ast['return'].append({'argument': value})
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_conditional_statements(self, token_stream, isNested):
tokens_checked = 0
condition = ""
els = False
tokens = []
ast = {'conditional_statement': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SCOPE_DEFINIER" and token_value == "{":
break
elif token == 0 and token_value == "if":
ast['conditional_statement'].append({'keyword': token_value})
elif token == 0 and token_value == "else":
ast['conditional_statement'].append({'keyword': token_value})
els = True
elif token == 1 and token_type != "FACTORIAL":
condition = token_value
elif token == 1 and token_type == "FACTORIAL":
math = MathModule()
condition = str(math.factorial(int(token_value)))
elif token > 1 and token_type == "FACTORIAL":
math = MathModule()
condition += str(math.factorial(int(token_value)))
elif token > 1 and token_type != "FACTORIAL":
condition += token_value.replace("mod", "%")
tokens_checked += 1
if els == False:
ast['conditional_statement'].append({'condition': condition})
self.token_index += tokens_checked
scope_tokens = self.get_scope(token_stream[tokens_checked + 1:len(token_stream)])
if isNested == False:
self.parse_scope(scope_tokens[0], ast, 'conditional_statement', False, False)
else:
self.parse_scope(scope_tokens[0], ast, 'conditional_statement', True, False)
tokens_checked += scope_tokens[1]
return [ast, tokens_checked]
def get_token_value(self, token):
for variable in self.symbol_table:
if variable[1] == token: return variable[2]
def get_token_type(self, token):
for variable in self.symbol_table:
if variable[1] == token: return variable[0]
def find_token_type(self, token):
#int
try:
token = int(token)
datatype = 'int'
except:
pass
def get_tokens_range(self, value):
amount = 0
if "..." in value:
value = value.split('...')
amount = 1
elif ".." in value:
value = value.split('..')
amount = 0
arr = []
try:
value[0], value[1] = int(value[0]), int(value[1])
for i in range(value[0], value[1] + amount): # startValue to endValue
arr.append(i)
except:
startValue, endValue = value[0].replace("'", "").replace('"', ''), value[1].replace("'", "").replace('"', '')
for i in range(ord(startValue), ord(endValue) + amount):
arr.append(chr(i))
return arr
def get_token_match(self, start_matcher, end_matcher, token_stream):
tokens = []
tokens_checked = 0
for token in token_stream:
tokens_checked += 1
if token[1] == end_matcher:
return [tokens, tokens_checked - 1]
else:
tokens.append(token)
return False
def parse_loop(self, token_stream, isNested):
# for x :: x < 10 :: x++ {
tokens_checked = 0
keyword = ""
condition = ""
value = ""
increment = ""
var_decl = False
ast = {'loop': []}
while tokens_checked < len(token_stream):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SCOPE_DEFINIER" and token_value == "{":
break
if tokens_checked == 0:
ast['loop'].append({'keyword': token_value})
keyword = token_value
if tokens_checked == 1 and keyword == "for":
tokens = self.get_token_match("::", "{", token_stream)
inner_tokens = [i[1] for i in tokens[0]]
if "in" in inner_tokens:
array = ""
data_type = self.get_token_type(inner_tokens[3])
ast['loop'].append({'name': inner_tokens[1]})
ast['loop'].append({'type': data_type})
ast['loop'].append({'array': ''.join(inner_tokens[3:])})
self.symbol_table.append([data_type, inner_tokens[1], inner_tokens[3:]])
else:
if len([i for i, x in enumerate(inner_tokens) if x == "::"]) != 2:
self.error_message("SyntaxError:\nSymbol '::' is missing in a for loop", token_stream, tokens_checked)
inner_tokens[:] = [x for x in inner_tokens if x != '::']
ast['loop'].append({'name': inner_tokens[1]})
ast['loop'].append({'start_value': self.get_token_value(inner_tokens[2])})
ast['loop'].append({'end_value': inner_tokens[4]})
if "++" in inner_tokens[5]:
ast['loop'].append({'increment': "1"})
elif "--" in inner_tokens[5]:
ast['loop'].append({'increment': "-1"})
tokens_checked += tokens[1]
break
elif keyword == "while":
if tokens_checked == 1: condition = token_value
elif tokens_checked == 2 and token_type != "FACTORIAL":
condition += token_value
elif tokens_checked == 2 and token_type == "FACTORIAL":
math = MathModule()
condition = str(math.factorial(int(token_value)))
elif tokens_checked > 2 and token_type == "FACTORIAL":
math = MathModule()
condition += str(math.factorial(int(token_value)))
elif tokens_checked > 2 and token_type != "FACTORIAL":
condition += token_value.replace("mod", "%")
tokens_checked += 1
self.token_index += tokens_checked
scope_tokens = self.get_scope(token_stream[tokens_checked + 1:len(token_stream)])
if keyword == "while": ast['loop'].append({'condition': condition})
if isNested == False:
self.parse_scope(scope_tokens[0], ast, 'loop', False, False)
else:
self.parse_scope(scope_tokens[0], ast, 'loop', True, False)
tokens_checked += scope_tokens[1]
return [ast, tokens_checked]
def parse_func(self, token_stream, isNested):
tokens_checked = 0
value = ""
ast = {'function_declaration': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SCOPE_DEFINIER" and token_value == "{": break
if token == 1 and token_type in ["IDENTIFIER", "INNER_FUNC"]:
ast['function_declaration'].append({'name': token_value})
elif token == 2 and token_type != "COLON":
msg = "SyntaxError at line "+ str(self.lines) +":\n':' is missing"
self.error_message(msg, token_stream, token)
elif token == 3 and token_value == "0":
value = token_value
elif token == 3 and token_type in ["IDENTIFIER", "COMMA"]:
value = token_value
elif token > 3 and token_type in ["IDENTIFIER", "COMMA"]:
value += token_value
tokens_checked += 1
ast['function_declaration'].append({'argument': value})
self.token_index += tokens_checked - 1
scope_tokens = self.get_scope(token_stream[tokens_checked + 1:len(token_stream)])
if isNested == False:
self.parse_scope(scope_tokens[0], ast, 'function_declaration', False, False)
else:
self.parse_scope(scope_tokens[0], ast, 'function_declaration', True, False)
tokens_checked += scope_tokens[1]
self.symbol_table.append(['function', ast['function_declaration'][0]['name'], ast['function_declaration'][1]['argument']])
return [ast, tokens_checked]
def parse_class(self, token_stream, isNested):
tokens_checked = 0
value = ""
ast = {'class': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SCOPE_DEFINIER" and token_value == "{": break
if token == 1 and token_type == "IDENTIFIER":
ast['class'].append({'name': token_value})
elif token == 2 and token_type != "COLON":
msg = f"SyntaxError at line {self.lines}\n':' is missing."
self.error_message(msg, token_stream, token)
elif token == 3 and token_value == "object":
ast['class'].append({'argument': token_value})
decl = True
tokens_checked += 1
scope_tokens = self.get_scope(token_stream[tokens_checked + 1:len(token_stream)])
self.token_index += tokens_checked - 1
if isNested == False:
self.parse_scope(scope_tokens[0], ast, 'class', False, False)
else:
self.parse_scope(scope_tokens[0], ast, 'class', True, False)
tokens_checked += scope_tokens[1]
self.symbol_table.append(['function', ast['class'][0]['name'], ast['class'][1]['argument']])
return [ast, tokens_checked]
def parse_single_line_comment(self, token_stream, inScope):
tokens_checked = 0
comment_str = ""
ast = {'comment': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "NEWLINE": break
if token >= 1:
comment_str += str(token_value) + " "
tokens_checked += 1
ast['comment'].append({'Comment_str': comment_str})
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_multi_line_comment(self, token_stream, inScope):
tokens_checked = 0
comment_str = ""
ast = {'comment': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "COMMENT" and token_value == "**|": break
if token >= 1:
comment_str += str(token_value) + " "
tokens_checked += 1
ast['comment'].append({'Comment_str': comment_str})
if inScope == False:
self.ast['main_scope'].append(ast)
self.token_index += tokens_checked
return [ast, tokens_checked]
def parse_match(self, token_stream, isNested):
"""
var stdin;
input stdin;
match stdin {
1 -> echo "One";
}
"""
tokens_checked = 0
ast = {'match': []}
scope_ast = {'scope': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SCOPE_DEFINIER" and token_value == "{": break
if token == 1:
ast['match'].append({'variable': token_value})
tokens_checked += 1
self.token_index += tokens_checked - 1
scope_tokens = self.get_scope(token_stream[tokens_checked + 1:len(token_stream)])
if isNested == False:
self.parse_scope(scope_tokens[0], ast, 'match', False, False, True)
else:
self.parse_scope(scope_tokens[0], ast, 'match', True, False, True)
tokens_checked += scope_tokens[1]
def parse_case(self, token_stream):
tokens_checked = 0
value = ""
ast = {'current_case' : []}
while tokens_checked < len(token_stream):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
print(tokens_checked, token_type, token_value)
if token_type == "SEMIC" and token_value == ";": break
if tokens_checked == 0:
ast['current_case'].append({'case': token_value})
elif tokens_checked == 1 and token_type != "ARROW":
msg = f"SyntaxError at line {self.lines}\n{token_type, token_value} !='->' symbol is missing."
self.error_message(msg, token_stream, tokens_checked)
break
elif tokens_checked == 2:
value = token_value
elif tokens_checked > 2:
value += f" {token_value}"
tokens_checked += 1
self.token_index += tokens_checked
ast['current_case'].append({'command' : value})
return [ast, tokens_checked]
def parse_macros(self, token_stream):
"""
macros
{
define x, 10;
redefine @echo, "print";
}
"""
tokens_checked = 0
ast = {'macros': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SCOPE_DEFINIER" and token_value == "{": break
tokens_checked += 1
scope_tokens = self.get_scope(token_stream[tokens_checked + 1:len(token_stream)])
self.parse_scope(scope_tokens[0], ast, 'macros', False, True)
def parse_macros_define(self, token_stream, inScope):
tokens_checked = 0
ast = {'define': []}
value = ""
for token in range(len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SEMIC":break
if token == 0:
ast['define'].append({'function': token_value})
elif token == 1 and token_type == "IDENTIFIER":
ast['define'].append({'name': token_value})
elif token == 2 and token_type in ["IDENTIFIER", "STRING", "INTEGER", "BOOLEAN", "COMPLEX_NUMBER"]:
value = str(token_value)
elif token > 2:
value += str(token_value)
tokens_checked += 1
self.token_index += tokens_checked
ast['define'].append({"value": value})
if inScope == False:
self.ast['main_scope'].append(ast)
self.symbol_table.append([type(ast['define'][2]['value']), ast['define'][1]['name'], ast['define'][2]['value']])
return [ast, tokens_checked]
# ---------------------------BROWSER------------------------------------
# -------------------------------CALL FUNCTION------------------------------
def call_func(self, token_stream, inScope):
tokens_checked = 0
name = ""
argument = ""
ast = {'call_function': []}
for token in range(0, len(token_stream)):
token_type = token_stream[tokens_checked][0]
token_value = token_stream[tokens_checked][1]
if token_type == "SEMIC": break
if token == 0:
ast['call_function'].append({'name': token_value})
elif token == 1 and token_type != "COLON":
self.error_message("SyntaxError at line {}: ':' is missing".format(self.lines))
elif token == 2:
if token_value == "()": argument = ""
else: argument = token_value
elif token > 2 and token_type in ['COMMA', 'INTEGER', 'STRING', 'BOOL']:
argument += token_value
tokens_checked += 1
self.token_index += tokens_checked
ast['call_function'].append({'argument': argument})
self.ast['main_scope'].append(ast)
return [ast, tokens_checked]
# --------------------------------------------------------------------------
def error_message(self, msg, token_stream, token):
tokens_checked = 1
length = 0
#This for loop will get amount of tokens in the error line
for token in range(len(token_stream)):
if token_stream[token][0] in ["SEMIC", "NEWLINE"]: break
tokens_checked += 1
print(msg)
error_msg = " ".join(str(token[1]) for token in token_stream[:tokens_checked] if token[0] not in ["SEMIC", "NEWLINE"]) # Loops through each token in token_stream and forms a error line
print("".join(error_msg[:-2] + ";" if error_msg[-1:] == ";" else error_msg))
for i in range(len(token_stream)):
if i == token: break
else: length += len(token_stream[i][1])
print(" " * length + "^")
self.error = True
|
import configparser
import pathlib
from . import cli
from .log import logger
_here = pathlib.Path(__file__).resolve().parent
_root = _here.parent
SAMPLE_CONFIG = '''\
[common]
# Instagram user handle.
username = instagram
# Data directory for database and downloaded media; can be overridden by more
# specific configurations.
#
# The path undergoes tilde expansion. If a relative path is given, it is deemed
# as relative to the parent directory of the `insta` package. The same path
# resolution rules apply to other path settings below.
data_dir = ~/data/instagram
# Path for the SQLite database; defaults to {data_dir}/databases/{username}.db.
# database_path =
# Directory for downloaded images; defaults to {data_dir}/media/{username}/.
# images_dir =
# Directory for downloaded videos; defaults to images_dir.
# videos_dir =
# Whether to download video cover images; defaults to True.
# download_video_covers = False
[feed]
# The URL at which the generated feed will be served; used as atom:id and
# atom:feed with rel=self in atom:feed.
feed_url = https://example.org/instagram.atom
# Path to write the generated feed on the local filesystem.
local_path = /var/www/instafeed/instagram.atom
# Whether to download media of new entries; defaults to False.
# download_media = True
'''
class ConfigError(Exception):
pass
class SubConfig(object):
pass
class Config(object):
# conf is a configparser.ConfigParser object.
def __init__(self, conf):
self._conf = conf
self.username = conf.get('common', 'username', fallback=None)
if not self.username:
raise ConfigError('username required')
self.data_dir = self._resolve_path(
conf.get('common', 'data_dir', fallback=None),
is_dir=True, mkdir=True,
)
default_database_path = ((self.data_dir / 'databases' / f'{self.username}.db')
if self.data_dir is not None else None)
self.database_path = self._resolve_path(
conf.get('common', 'database_path', fallback=default_database_path),
is_dir=False, mkdir=True,
)
if not self.database_path:
raise ConfigError('database_path required')
default_images_dir = ((self.data_dir / 'media' / self.username)
if self.data_dir is not None else None)
self.images_dir = self._resolve_path(
conf.get('common', 'images_dir', fallback=default_images_dir),
is_dir=True, mkdir=True,
)
default_videos_dir = self.images_dir
self.videos_dir = self._resolve_path(
conf.get('common', 'videos_dir', fallback=default_videos_dir),
is_dir=True, mkdir=True,
)
self.download_video_covers = conf.getboolean(
'common', 'download_video_covers', fallback=True)
# The feed section
self.feed = SubConfig()
self.feed.feed_url = conf.get('feed', 'feed_url', fallback=None)
self.feed.local_path = self._resolve_path(
conf.get('feed', 'local_path', fallback=None),
is_dir=False, mkdir=True,
)
self.feed.download_media = conf.getboolean('feed', 'download_media', fallback=False)
@staticmethod
def _resolve_path(path, *, is_dir=False, mkdir=False):
if path is None:
return None
path = pathlib.Path(path).expanduser()
if not path.is_absolute():
path = _root.joinpath(path)
if mkdir:
directory = path if is_dir else path.parent
if not directory.is_dir():
logger.info(f'makedirs: {directory}')
directory.mkdir(parents=True)
return path
def load_config(config_path):
conf = configparser.ConfigParser()
if not conf.read(config_path):
raise RuntimeError(f'{config_path}: not found or failed to parse')
return Config(conf)
def validate_config(config_path):
conf = load_config(config_path)
if not conf.images_dir:
logger.warning('images_dir recommended')
def main():
parser = cli.ArgumentParser(description='Validate or generate sample config file.')
parser.add_argument('config_path', nargs='?',
help='''if specified, validate the config file;
otherwise, print a sample config file to stdout''')
args = parser.parse_args()
cli.adjust_verbosity(args)
if args.config_path:
def validate():
validate_config(args.config_path)
logger.info('valid config')
cli.sandbox(validate)
else:
print(SAMPLE_CONFIG, end='')
if __name__ == '__main__':
main()
|
import pytest, fastai
from fastai.gen_doc.doctest import this_tests
def test_has_version():
this_tests('na')
assert fastai.__version__
|
"""
Setup of meshpy python codebase
Author: Jeff Mahler
"""
from setuptools import setup
requirements = [
'numpy',
'scipy',
'autolab_core',
'matplotlib',
'multiprocess',
'opencv-python',
'keras',
'cycler',
'Pillow',
'pyserial>=3.4',
'ipython==5.5.0',
'scikit-image',
'scikit-learn',
'scikit-video'
]
exec(open('perception/version.py').read())
setup(name='autolab_perception',
version=__version__,
description='Perception utilities for the Berkeley AutoLab',
author='Jeff Mahler',
author_email='jmahler@berkeley.edu',
license = 'Apache Software License',
url = 'https://github.com/BerkeleyAutomation/perception',
keywords = 'robotics grasping vision perception',
classifiers = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English',
'Topic :: Scientific/Engineering'
],
packages=['perception'],
install_requires = requirements,
extras_require = { 'docs' : [
'sphinx',
'sphinxcontrib-napoleon',
'sphinx_rtd_theme'
],
'ros' : [
'primesense',
'rospkg',
'catkin_pkg',
'empy'
],
}
)
|
from flask import Flask
from flask import render_template
import csv
app = Flask(__name__)
def load_topics():
f_topics = open('data/topics_scores.csv', 'rb')
reader = csv.reader(f_topics,delimiter=",")
headers = reader.next()
topics = []
for row in reader:
topic = {}
topic["topic_id"] = row[headers.index("topic_id")]
topic["topic_ngrams"] = row[headers.index("topic_ngrams")]
topic["extrinsic_umass"] = row[headers.index("extrinsic_umass")]
topic["extrinsic_uci"] = row[headers.index("extrinsic_uci")]
topic["intrinsic_umass"] = row[headers.index("intrinsic_umass")]
topic["intrinsic_uci"] = row[headers.index("intrinsic_uci")]
topics.append(topic)
# topic_id = row[0]
# ngrams = row[1]
# print('Topic {}: {}'.format(topic_id,ngrams))
# ngrams_list = ngrams.split(" ")
# extrinsic_umass_result = extrinsic_umass.fit(ngrams_list)
# intrinsic_umass_result = intrinsic_umass.fit(ngrams_list)
# extrinsic_uci_result = extrinsic_uci.fit(ngrams_list)
# intrinsic_uci_result = intrinsic_uci.fit(ngrams_list)
# doc = {
# "n_topics":20,
# "topic_id":int(topic_id),
# "ngrams":ngrams,
# "scores":{
# "intrinsic":{
# "cpp":intrinsic_umass_result,
# "pmi":intrinsic_uci_result
# },
# "extrinsic":{
# "cpp":extrinsic_umass_result,
# "pmi":extrinsic_uci_result
# }}
# }
# scores.append(doc)
return topics
@app.route("/")
def hello():
return render_template('topics.html',topics=load_topics())
if __name__ == "__main__":
app.run()
|
from typing import Dict, Any
import datetime as dt
from weather import arrange_data
class WeatherService:
@classmethod
def get_weather(cls) -> Dict[str, Any]:
return getAndParseData()
def getAndParseData():
forecast = arrange_data.arrange_data()
today = dt.datetime.now()
tomorrow = dt.datetime(today.year, today.month, today.day + 1)
days = []
for tempDay in forecast:
for tempHour in tempDay:
if tomorrow.day == tempHour[0].day:
days.append(tempHour)
rainChances = get_rain_chances(days)
temperature = getTemp(days)
sky = getSky(days)
if rainChances > 61:
willRain = True
else:
willRain = False
returnObj = {'temperature': temperature, 'rain': willRain}
return returnObj
def getSky(days):
sky = 0
for day in days:
if day[3][0] == "Despejado":
sky += 1
else:
sky += 3
if sky >= 4:
return "Nublado"
else:
return "Despejado"
def getTemp(days):
temperature = 0
for day in days:
temperature += int(day[1][1])
temperature = temperature / len(days)
return round(float(temperature),2)
def get_rain_chances(days):
humidity = float(get_humidity(days))
sky_desc = get_sky(days)
sky_const = 0
humidity_const = 0
bias = 25
sky = sky_desc.split(",")
if sky[0] == "Despejado":
sky_const = 0
bias = 0
elif sky[1] == " con pocas nubes":
sky_const = 0.5
elif sky[1] == " con nubes aisladas":
sky_const = 0.8
elif sky[1] == " totalmente nublado":
sky_const = 1.1
if humidity > 50:
humidity_const = 0.6
elif humidity > 70:
humidity_const = 0.8
else:
humidity_const = 1
return float(round(sky_const*humidity_const*humidity + bias, 2))
def get_humidity(days):
humidity = 0
for day in days:
humidity += int(day[2])
humidity = humidity / len(days)
return str(humidity)
def get_sky(days):
sky = [0, 0, 0]
shorty = [0, 0]
final = ""
for day in days:
skies = day[3][1]
if skies == "pocas nubes":
sky[0] += 1
elif skies == "ninguna nube":
sky[0] += 1
elif skies == "nubes aisladas":
sky[1] += 1
elif skies == "muchas nubes":
sky[2] += 1
elif skies == "nubes dispersas":
sky[1] += 1
short = day[3][0]
if short == "Despejado":
shorty[0] += 1
elif short == "Nublado":
shorty[1] += 1
if shorty.index(max(shorty)) == 0:
final += "Despejado, "
else:
final += "Nublado, "
if sky.index(max(sky)) == 0:
final += "con pocas nubes"
elif sky.index(max(sky)) == 1:
final += "con nubes aisladas"
else:
final += " totalmente nublado"
return final
|
import json
import os
import re
from django.http import HttpResponse
from django.views import View
class SandboxView(View):
def get(self, request, *args, **kwargs):
base_path = os.path.dirname(__file__)
get_fixture = lambda filename: open(
os.path.join(base_path, "sandbox-responses", filename + ".json")
)
example_postcodes = (
"AA12AA", # station known
"AA12AB", # station not known
"AA13AA", # address picker
)
if "postcode" in kwargs:
postcode = re.sub("[^A-Z0-9]", "", kwargs["postcode"].upper())
if postcode in example_postcodes:
return HttpResponse(
get_fixture(postcode), content_type="application/json", status=200
)
return HttpResponse(
json.dumps({"message": "Could not geocode from any source"}),
content_type="application/json",
status=400,
)
example_slugs = (
"e07000223-524-2-truleigh-way-shoreham-by-sea-west-sussex-bn436hw",
"e07000223-527-5-truleigh-way-shoreham-by-sea-west-sussex-bn436hw",
)
if "slug" in kwargs:
if kwargs["slug"] in example_slugs:
return HttpResponse(
get_fixture(kwargs["slug"]),
content_type="application/json",
status=200,
)
return HttpResponse(
json.dumps({"message": "Address not found"}),
content_type="application/json",
status=404,
)
return HttpResponse(
json.dumps({"message": "Internal Server Error"}),
content_type="application/json",
status=500,
)
|
#coding: utf8
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class serializableModel(db.Model):
__abstract__ = True
def as_dict(self, recursif=False):
if recursif :
return self.as_dict_withrelationships()
else :
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def as_dict_withrelationships(self):
obj = self.as_dict()
for key in self.__mapper__.relationships.keys() :
if self.__mapper__.relationships[key].uselist :
obj[key] = [ item.as_dict() for item in getattr(self, key)]
else :
obj[key] = getattr(self, key).as_dict()
return obj
|
#!/usr/bin/env python
import numpy as np
from itertools import product
from attacks.differential_evolution import differential_evolution
import cma
from attacks.base_attack import Attack
class OurAttack(Attack):
"""
TODO: Write Comment
"""
def __init__(self, args):
"""
TODO: Write Comment
"""
Attack.__init__(self, args)
self.set_ES(args.evolutionary_strategy)
self.POPSIZE = self.args.pop_size
self.MAXITER = self.args.max_iter
self.THRESHOLD = self.args.threshold
def set_ES(self, no=1):
"""
TODO: Write Comment
"""
if no == 0:
self.DE, self.CMAES = True, False
self.attack_name += '_DE'
elif no == 1:
self.DE, self.CMAES = False, True
self.attack_name += '_CMAES'
else: raise Exception('Unknown Evolutionary Strategy, please choose a supported strategy')
def predict_classes(self, xs, target_class):
"""
TODO: Write Comment
"""
predictions = self.model.predict(self.perturb_image(xs))[:,target_class]
return predictions if not self.TARGETED else 1 - predictions
def attack_success(self, xs, target_class):
"""
TODO: Write Comment
"""
predicted_class = np.argmax(self.model.predict(self.perturb_image(xs))[0])
if ((self.TARGETED and predicted_class == target_class) or (not self.TARGETED and predicted_class != target_class)): return True
def attack(self, target_class, th):
"""
TODO: Write Comment
"""
bounds, initial = self.get_bounds(th)
predict_fn = lambda xs: self.predict_classes(xs, target_class)
if self.DE:
callback_fn = lambda x, convergence: self.attack_success(x, target_class)
es = differential_evolution(predict_fn, bounds, disp=self.VERBOSE, maxiter=self.MAXITER, popsize=max(1, self.POPSIZE // len(bounds)), recombination=1, atol=-1, callback=callback_fn, polish=False)
result = es.x
elif self.CMAES:
def callback_fn(x):
if self.attack_success(x.result[0], target_class): raise Exception('Attack Completed :) Earlier than expected')
opts = cma.CMAOptions()
# if not self.VERBOSE:
opts.set('verbose', -9)
opts.set('verb_disp', 40000)
opts.set('verb_log', 40000)
opts.set('verb_time', False)
opts.set('popsize', self.POPSIZE)
opts.set('maxiter', self.MAXITER)
opts.set('bounds', bounds)
if "Pixel" in self.attack_name:
std = 63
elif "Threshold" in self.attack_name:
std = th
es = cma.CMAEvolutionStrategy(initial, std/4, opts)
try:
es.optimize(
predict_fn,
maxfun=max(1, 400 // len(bounds)) * len(bounds) * 100,
callback=callback_fn,
iterations=1,
)
except Exception as exception:
print(exception)
result = es.result[0]
else: raise Exception('Unknown Evolutionary Strategy, please choose a supported strategy')
return result
def attack_image(self, target_class):
"""
TODO: Write Comment
"""
image_results = []
if self.THRESHOLD == -1:
start, end = 1, 255
while True:
threshold = (start + end) // 2
if self.VERBOSE: print(f"[#][.]Attacking {self.model.name} with {self.attack_name} threshold {threshold} -- image {self.img}")
image_result, success = self.start_attack(target_class, threshold)
if success:
end = threshold -1
else:
start = threshold + 1
if end < start: break
image_results += image_result
else:
if self.VERBOSE: print(f"[#][.]Attacking {self.model.name} with {self.attack_name} threshold {self.THRESHOLD} -- image {self.img}")
image_result, success = self.start_attack(target_class, self.THRESHOLD)
image_results += image_result
return image_results
class PixelAttack(OurAttack):
"""
TODO: Write Comment
"""
def __init__(self, args):
"""
TODO: Write Comment
"""
OurAttack.__init__(self, args)
def set_attack_name(self):
"""
TODO: Write Comment
"""
self.attack_name = "Pixel"
def get_bounds(self, th):
"""
TODO: Write Comment
"""
initial = []
if self.DE:
bounds = [(0, self.x.shape[-3]), (0, self.x.shape[-2])]
for _ in range(self.x.shape[-1]):
bounds += [(0,255)]
bounds = bounds * th
elif self.CMAES:
for count, (i, j) in enumerate(product(range(self.x.shape[-3]), range(self.x.shape[-2]))):
initial += [i, j]
for k in range(self.x.shape[-1]):
initial += [self.x[i, j, k]]
if count == th - 1:
break
else:
continue
min_bounds = [0, 0]
for _ in range(self.x.shape[-1]):
min_bounds += [0]
min_bounds = min_bounds * th
max_bounds = [self.x.shape[-3], self.x.shape[-2]]
for _ in range(self.x.shape[-1]):
max_bounds += [255]
max_bounds = max_bounds * th
bounds = [min_bounds, max_bounds]
else: raise Exception('Unknown Evolutionary Strategy, please choose a supported strategy')
return bounds, initial
def perturb_image(self, x):
"""
TODO: Write Comment
"""
if x.ndim < 2:
x = np.array([x])
imgs = np.tile(self.x, [len(x)] + [1] * (x.ndim + 1))
x = x.astype(int)
for adv, image in zip(x, imgs):
for pixel in np.split(adv, len(adv) // (2 + self.x.shape[-1])):
x_pos, y_pos, *rgb = pixel
image[x_pos % self.x.shape[-3], y_pos % self.x.shape[-2]] = rgb
return imgs
class ThresholdAttack(OurAttack):
"""
TODO: Write Comment
"""
def __init__(self, args):
"""
TODO: Write Comment
"""
OurAttack.__init__(self, args)
def set_attack_name(self):
"""
TODO: Write Comment
"""
self.attack_name = "Threshold"
def get_bounds(self, th):
def bound_th(value):
return (np.clip(value - th, 0, 255), np.clip(value + th, 0, 255))
minbounds, maxbounds, bounds, initial = [], [], [], []
for i, j, k in product(range(self.x.shape[-3]), range(self.x.shape[-2]), range(self.x.shape[-1])):
temp = self.x[i, j, k]
initial += [temp]
bound = bound_th(temp)
if self.CMAES:
minbounds += [bound[0]]
maxbounds += [bound[1]]
if self.DE:
bounds += [bound]
if self.CMAES:
bounds = [minbounds, maxbounds]
return bounds, initial
def perturb_image(self, x):
if x.ndim < 2:
x = np.array([x])
imgs = np.tile(self.x, [len(x)] + [1]*(x.ndim+1))
x = x.astype(int)
for adv, image in zip(x, imgs):
for count, (i, j, k) in enumerate(
product(range(image.shape[-3]), range(image.shape[-2]), range(image.shape[-1]))
):
image[i, j, k] = adv[count]
return imgs
|
import unittest
import io
from tokenizer.jsontokenizer import JsonTokenizer
from decoder.streamdecoder import JsonDecoder
class TestJsonDecoderMethods(unittest.TestCase):
def test_decode(self):
json = '{"name1": "value1", "name2": "value2"}'
result = list()
stream = io.StringIO(json)
tokenizer = JsonTokenizer(stream)
JsonDecoder() \
.tokenizer(tokenizer) \
.root_class_name('Example') \
.event_handler(lambda e, p: result.append((e, p))) \
.decode()
self.assertEqual(len(result), 1)
(obj, path) = result[0]
self.assertEqual(path, '')
self.assertEqual(type(obj).__name__, 'Example')
self.assertEqual(obj.name1, 'value1')
self.assertEqual(obj.name2, 'value2')
def test_decode_subscribe_on_nested_object(self):
json = """
[
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
]"""
result = list()
stream = io.StringIO(json)
tokenizer = JsonTokenizer(stream)
JsonDecoder() \
.tokenizer(tokenizer) \
.root_class_name('Example') \
.predicate('batters') \
.event_handler(lambda e, p: result.append((e, p))) \
.decode()
self.assertEqual(len(result), 3)
self.assertEqual(len(result[0][0].batter), 4)
self.assertEqual(len(result[1][0].batter), 1)
self.assertEqual(len(result[2][0].batter), 2)
(obj, path) = result[0]
self.assertEqual(path, 'batters')
self.assertEqual(type(obj).__name__, 'Batters')
self.assertEqual(type(obj.batter).__name__, 'list')
self.assertEqual(len(obj.batter), 4)
self.assertEqual(type(obj.batter[0]).__name__, 'Batter')
self.assertEqual(obj.batter[0].id, '1001')
self.assertEqual(obj.batter[0].type, 'Regular')
self.assertEqual(obj.batter[1].id, '1002')
self.assertEqual(obj.batter[1].type, 'Chocolate')
self.assertEqual(obj.batter[2].id, '1003')
self.assertEqual(obj.batter[2].type, 'Blueberry')
self.assertEqual(obj.batter[3].id, '1004')
self.assertEqual(obj.batter[3].type, "Devil's Food")
def test_decode_subscribe_on_nested_array(self):
json = """
[
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
]"""
result = list()
stream = io.StringIO(json)
tokenizer = JsonTokenizer(stream)
JsonDecoder() \
.tokenizer(tokenizer) \
.root_class_name('Example') \
.predicate('batters.batter') \
.event_handler(lambda e, p: result.append((e, p))) \
.decode()
self.assertEqual(len(result), 7)
self.assertEqual(result[1][1], 'batters.batter')
self.assertEqual(result[6][1], 'batters.batter')
self.assertEqual(type(result).__name__, 'list')
(obj1, path1) = result[0]
self.assertEqual(path1, 'batters.batter')
self.assertEqual(type(obj1).__name__, 'Batter')
self.assertEqual(obj1.id, '1001')
self.assertEqual(obj1.type, 'Regular')
(obj6, path6) = result[6]
self.assertEqual(path6, 'batters.batter')
self.assertEqual(type(obj6).__name__, 'Batter')
self.assertEqual(obj6.id, '1002')
self.assertEqual(obj6.type, 'Chocolate')
def test_decode_subscribe_on_simple_value(self):
json = """
[
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.56,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.57,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
]"""
result = list()
stream = io.StringIO(json)
tokenizer = JsonTokenizer(stream)
JsonDecoder() \
.tokenizer(tokenizer) \
.root_class_name('Example') \
.predicate('ppu') \
.event_handler(lambda e, p: result.append((e, p))) \
.decode()
self.assertEqual(len(result), 3)
self.assertEqual(result[0][0], '0.55')
self.assertEqual(result[1][0], '0.56')
self.assertEqual(result[2][0], '0.57')
def test_decode_with_translators(self):
class MyBatter:
__slots__ = ('my_id', 'my_type')
def __init__(self, id, type):
self.my_id = id
self.my_type = type
class MyBatters:
__slots__ = ('my_batter')
def __init__(self, batter):
self.my_batter = batter
json = """
[
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" },
{ "id": "1003", "type": "Blueberry" },
{ "id": "1004", "type": "Devil's Food" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5007", "type": "Powdered Sugar" },
{ "id": "5006", "type": "Chocolate with Sprinkles" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.56,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5005", "type": "Sugar" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.57,
"batters":
{
"batter":
[
{ "id": "1001", "type": "Regular" },
{ "id": "1002", "type": "Chocolate" }
]
},
"topping":
[
{ "id": "5001", "type": "None" },
{ "id": "5002", "type": "Glazed" },
{ "id": "5003", "type": "Chocolate" },
{ "id": "5004", "type": "Maple" }
]
}
]"""
result = list()
stream = io.StringIO(json)
tokenizer = JsonTokenizer(stream)
JsonDecoder() \
.tokenizer(tokenizer) \
.root_class_name('Example') \
.translator('batters', lambda o: MyBatters(o.batter)) \
.translator('batters.batter', lambda o: MyBatter(o.id, o.type)) \
.event_handler(lambda e, p: result.append(e)) \
.decode()
self.assertEqual(len(result), 3)
self.assertEqual(type(result[0].batters).__name__, 'MyBatters')
self.assertEqual(type(result[0].batters.my_batter[0]).__name__, 'MyBatter')
self.assertEqual(result[0].batters.my_batter[0].my_id, '1001')
self.assertEqual(result[0].batters.my_batter[0].my_type, 'Regular')
def test_decode_array_of_arrays(self):
json = """
{
"id": "123",
"type": "car",
"data":
[
["red", "blue", "green"],
[1,2,3],
[true, false, true]
]
}"""
result = list()
stream = io.StringIO(json)
tokenizer = JsonTokenizer(stream)
JsonDecoder() \
.tokenizer(tokenizer) \
.root_class_name('Example') \
.event_handler(lambda e, p: result.append(e)) \
.decode()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].id, '123')
self.assertEqual(result[0].type, 'car')
self.assertEqual(len(result[0].data), 3)
self.assertEqual(len(result[0].data[0]), 3)
self.assertEqual(result[0].data[0][0], 'red')
self.assertEqual(result[0].data[0][1], 'blue')
self.assertEqual(result[0].data[0][2], 'green')
self.assertEqual(len(result[0].data[1]), 3)
self.assertEqual(result[0].data[1][0], '1')
self.assertEqual(result[0].data[1][1], '2')
self.assertEqual(result[0].data[1][2], '3')
self.assertEqual(len(result[0].data[2]), 3)
self.assertEqual(result[0].data[2][0], True)
self.assertEqual(result[0].data[2][1], False)
self.assertEqual(result[0].data[2][2], True)
suite = unittest.TestLoader().loadTestsFromTestCase(TestJsonDecoderMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import os
from verify import expect
from click.testing import CliRunner
from pyhistory.cli import main
from . import (
load_fixture, get_fixture_content, get_test_file_content, isolated_workdir,
)
@isolated_workdir
def test_add_list_delete_and_clear():
open('HISTORY.rst', 'w').close()
result = _run(['list'])
expect(result.output).to_be_equal(_join_lines(['', '']))
_run(['add', 'some_message'])
result = _run(['list'])
expect(result.output).to_be_equal(
_join_lines(['', '* some_message', ''])
)
_run(['add', 'next message'])
result = _run(['list'])
expect(result.output).to_be_equal(
_join_lines(['', '* some_message', '* next message', ''])
)
result = _run(['delete'])
expect(result.output).to_be_equal(_join_lines([
'', '1. some_message', '2. next message', '',
'(Delete by choosing entries numbers.)',
]))
_run(['delete', '1'])
result = _run(['list'])
expect(result.output).to_be_equal(
_join_lines(['', '* next message', ''])
)
_run(['add', 'some_message'])
_run(['clear', '--yes'])
result = _run(['list'])
expect(result.output).to_be_equal(_join_lines(['', '']))
@isolated_workdir
def test_update():
load_fixture('history.rst', 'HISTORY.rst')
_run(['add', 'some_message'])
_run(['add', 'next message'])
_run(['update', '1.0.6', '--date', 'today'])
content = get_fixture_content('history_after.rst')
file_content = get_test_file_content('HISTORY.rst')
expect(content).to_be_equal(file_content)
@isolated_workdir
def test_update_with_special_headlines():
load_fixture('history_special.rst', 'HISTORY.rst')
_run(['add', 'some_message'])
_run(['add', 'next message'])
_run(['update', '1.0.6', '--date', 'today'])
content = get_fixture_content('history_special_after.rst')
file_content = get_test_file_content('HISTORY.rst')
expect(content).to_be_equal(file_content)
@isolated_workdir
def test_update_at_line():
load_fixture('history.rst', 'HISTORY.rst')
_run(['add', 'some_message'])
_run(['add', 'next message'])
_run(['update', '1.0.6', '--date', 'today', '--at-line', '1'])
content = get_fixture_content('history_at_line_after.rst')
file_content = get_test_file_content('HISTORY.rst')
expect(content).to_be_equal(file_content)
@isolated_workdir
def test_update_at_wrong_line():
load_fixture('history.rst', 'HISTORY.rst')
res = _run(['update', '1.0.6', '--date', 'today', '--at-line', '0'])
expect(res.exit_code).to_be_equal(1)
expect(res.output).to_be_equal(
'"at_line" must be greater or equal to 1.\nAborted!\n'
)
@isolated_workdir
def test_update_at_negative_line():
load_fixture('history.rst', 'HISTORY.rst')
result = _run(['update', '1.0.6', '--date', 'today', '--at-line', '-1'])
expect(result.exit_code).to_be_equal(1)
@isolated_workdir
def test_update_with_line_too_long():
load_fixture('history.rst', 'HISTORY.rst')
_run([
'add', 'some very long and sophisticated message, which is too '
'long to fit 79 characters'
])
_run([
'add', 'next message, which also is very long, but should fit '
'into 79 characters aaaa'
])
_run([
'add', 'let just say Lorem ipsum dolor sit amet consectetur '
'adipisicing elit, sed do eiusmod tempor incididunt ut labore et '
'dolore magna aliqua. Ut enim ad minim veniam, quis nostrud '
'exercitation ullamco'
])
_run(['update', '1.0.6', '--date', 'today'])
content = get_fixture_content('history_update_long_line.rst')
file_content = get_test_file_content('HISTORY.rst')
expect(content).to_be_equal(file_content)
@isolated_workdir
def test_list_long_line():
load_fixture('history.rst', 'HISTORY.rst')
result = _run([
'add', 'some very long and sophisticated message, which is too '
'long to fit 79 characters'
])
result = _run(['list'])
expect(result.output).to_be_equal(
'\n'
'* some very long and sophisticated message, which is too long to '
'fit 79\n'
' characters\n'
'\n'
)
@isolated_workdir
def test_pyhistory_when_not_in_history_file_directory():
load_fixture('history.rst', 'HISTORY.rst')
original_working_dir = os.getcwd()
os.makedirs('one/two')
os.chdir('one/two')
_run(['add', 'some_message'])
_run(['add', 'next message'])
expect(len(os.listdir(os.getcwd()))).to_be_equal(0)
result = _run(['list'])
expect(result.output).to_be_equal(_join_lines(
['', '* some_message', '* next message', ''])
)
os.chdir(original_working_dir)
result = _run(['list'])
expect(result.output).to_be_equal(_join_lines(
['', '* some_message', '* next message', ''])
)
os.chdir('one/two')
_run(['update', '1.0.6', '--date', 'today'])
os.chdir(original_working_dir)
content = get_fixture_content('history_after.rst')
file_content = get_test_file_content('HISTORY.rst')
expect(content).to_be_equal(file_content)
@isolated_workdir
def test_delete_long_lines():
load_fixture('history.rst', 'HISTORY.rst')
_run([
'add', 'some very long and sophisticated message, which is too '
'long to fit 79 characters'
])
messages = [
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
]
for message in messages:
_run(['add', message])
_run([
'add', 'next message, which also is very long, and should not '
'fit into 79 characters'
])
result = _run(['delete'])
expect(result.output).to_be_equal(
'\n'
'1. some very long and sophisticated message, which is too long '
'to fit 79\n'
' characters\n'
'2. two\n'
'3. three\n'
'4. four\n'
'5. five\n'
'6. six\n'
'7. seven\n'
'8. eight\n'
'9. nine\n'
'10. next message, which also is very long, and should not fit '
'into 79\n'
' characters\n'
'\n'
'(Delete by choosing entries numbers.)\n'
)
@isolated_workdir
def test_delete_in_non_root():
os.makedirs('one/two')
os.chdir('one')
test_delete_long_lines()
@isolated_workdir
def test_history_file_not_found():
result = _run(['update', '1.0.6', '--date', 'today'])
expect(result.exit_code).to_be_equal(1)
def _join_lines(output):
return '\n'.join(output) + '\n'
def _run(command):
runner = CliRunner()
return runner.invoke(main, command)
|
'''
Copyright 2021 Flexera Software LLC
See LICENSE.TXT for full license text
SPDX-License-Identifier: MIT
Author : sgeary
Created On : Fri Dec 03 2021
File : upload_project_codebase.py
'''
import logging
import requests
logger = logging.getLogger(__name__)
#--------------------------------------------------
def upload_archive(projectID, zipFileContents, baseURL, authToken):
apiOptions = "&deleteExistingFileOnServer=true&expansionLevel=1"
apiEndPoint = baseURL + "/codeinsight/api/project/uploadProjectCodebase"
apiEndPoint += "?projectId=" + str(projectID) + apiOptions
logger.debug(" apiEndPoint: %s" %apiEndPoint)
headers = {'Content-Type': 'application/octet-stream', 'Authorization': 'Bearer ' + authToken}
# Make the request to get the required data
try:
response = requests.post(apiEndPoint, headers=headers, data=zipFileContents)
except requests.exceptions.RequestException as error: # Just catch all errors
logger.error(error)
return
###############################################################################
# We at least received a response from Code Insight so check the status to see
# what happened if there was an error or the expected data
if response.status_code == 200:
if "File upload successful" in response.json()["Content: "]:
logger.debug("File uploaded")
else:
logger.error(response.text)
return
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-07 09:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0022_auto_20180307_1551'),
]
operations = [
migrations.AddField(
model_name='catalog',
name='catalog_photo',
field=models.CharField(default='', max_length=200, null=True),
),
]
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class SupportApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_message(self, id_lang, email, id_contact, message, **kwargs):
"""
Create new message
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_message(id_lang, email, id_contact, message, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id_lang: Language ID used by user to write his message (required)
:param str email: User email in order to send him a response (required)
:param int id_contact: Contact ID to send the user message (required)
:param str message: User message (required)
:param int id_support: Link the message to a previous message
:param int id_product: Link the message to a product in catalog
:param int id_order: Link the message to an existing order
:param bool send_mail: Send confirmation email to the providen email
:return: Support
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_message_with_http_info(id_lang, email, id_contact, message, **kwargs)
else:
(data) = self.create_message_with_http_info(id_lang, email, id_contact, message, **kwargs)
return data
def create_message_with_http_info(self, id_lang, email, id_contact, message, **kwargs):
"""
Create new message
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_message_with_http_info(id_lang, email, id_contact, message, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id_lang: Language ID used by user to write his message (required)
:param str email: User email in order to send him a response (required)
:param int id_contact: Contact ID to send the user message (required)
:param str message: User message (required)
:param int id_support: Link the message to a previous message
:param int id_product: Link the message to a product in catalog
:param int id_order: Link the message to an existing order
:param bool send_mail: Send confirmation email to the providen email
:return: Support
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id_lang', 'email', 'id_contact', 'message', 'id_support', 'id_product', 'id_order', 'send_mail']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_message" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id_lang' is set
if ('id_lang' not in params) or (params['id_lang'] is None):
raise ValueError("Missing the required parameter `id_lang` when calling `create_message`")
# verify the required parameter 'email' is set
if ('email' not in params) or (params['email'] is None):
raise ValueError("Missing the required parameter `email` when calling `create_message`")
# verify the required parameter 'id_contact' is set
if ('id_contact' not in params) or (params['id_contact'] is None):
raise ValueError("Missing the required parameter `id_contact` when calling `create_message`")
# verify the required parameter 'message' is set
if ('message' not in params) or (params['message'] is None):
raise ValueError("Missing the required parameter `message` when calling `create_message`")
collection_formats = {}
resource_path = '/support'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'id_lang' in params:
form_params.append(('id_lang', params['id_lang']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'id_support' in params:
form_params.append(('id_support', params['id_support']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'email' in params:
form_params.append(('email', params['email']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'id_contact' in params:
form_params.append(('id_contact', params['id_contact']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'message' in params:
form_params.append(('message', params['message']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'id_product' in params:
form_params.append(('id_product', params['id_product']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'id_order' in params:
form_params.append(('id_order', params['id_order']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'send_mail' in params:
form_params.append(('send_mail', params['send_mail']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Support',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_contacts(self, **kwargs):
"""
Get contacts list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contacts(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:return: Contacts
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_contacts_with_http_info(**kwargs)
else:
(data) = self.get_contacts_with_http_info(**kwargs)
return data
def get_contacts_with_http_info(self, **kwargs):
"""
Get contacts list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_contacts_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:return: Contacts
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page', 'sort_by', 'sort_direction']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_contacts" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/support/contacts'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Contacts',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
from PyQt5 import QtCore
from PyQt5.QtCore import QVariant
import sys
from packaging import version as version_mod
python_version = f'{str(sys.version_info.major)}.{str(sys.version_info.minor)}'
if version_mod.parse(python_version) >= version_mod.parse('3.8'): # from version 3.8 this feature is included in the
# standard lib
from importlib import metadata
else:
import importlib_metadata as metadata # pragma: no cover
import pkgutil
import traceback
from collections import OrderedDict
import numpy as np
import datetime
from pathlib import Path
from ctypes import CFUNCTYPE
if 'win32' in sys.platform:
from ctypes import WINFUNCTYPE
import os
import importlib
import toml
import logging
from logging.handlers import TimedRotatingFileHandler
import inspect
import json
plot_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (14, 207, 189), (207, 14, 166), (207, 204, 14)]
Cb = 1.602176e-19 # coulomb
h = 6.626068e-34 # J.s
c = 2.997924586e8 # m.s-1
def get_set_local_dir(basename='pymodaq_local'):
"""Defines, creates abd returns a local folder where configurations files will be saved
Parameters
----------
basename: (str) how the configuration folder will be named
Returns
-------
Path: the local path
"""
local_path = Path.home().joinpath(basename)
if not local_path.is_dir(): # pragma: no cover
try:
local_path.mkdir()
except Exception as e:
local_path = Path(__file__).parent.parent.joinpath(basename)
info = f"Cannot create local folder from your **Home** defined location: {Path.home()}," \
f" using PyMoDAQ's folder as local directory: {local_path}"
print(info)
if not local_path.is_dir():
local_path.mkdir()
return local_path
def get_set_config_path(config_name='config'):
"""Creates a folder in the local config directory to store specific configuration files
Parameters
----------
config_name: (str) name of the configuration folder
Returns
-------
See Also
--------
get_set_local_dir
"""
local_path = get_set_local_dir()
path = local_path.joinpath(config_name)
if not path.is_dir():
path.mkdir() # pragma: no cover
return path
def get_set_log_path():
""" creates and return the config folder path for log files
"""
return get_set_config_path('log')
def set_logger(logger_name, add_handler=False, base_logger=False, add_to_console=False, log_level=None):
"""defines a logger of a given name and eventually add an handler to it
Parameters
----------
logger_name: (str) the name of the logger (usually it is the module name as returned by get_module_name
add_handler (bool) if True adds a TimedRotatingFileHandler to the logger instance (should be True if logger set from
main app
base_logger: (bool) specify if this is the parent logger (usually where one defines the handler)
Returns
-------
logger: (logging.logger) logger instance
See Also
--------
get_module_name, logging.handlers.TimedRotatingFileHandler
"""
if not base_logger:
logger_name = f'pymodaq.{logger_name}'
logger = logging.getLogger(logger_name)
log_path = get_set_config_path('log')
if add_handler:
if log_level is None:
config = load_config()
log_level = config['general']['debug_level']
logger.setLevel(log_level)
handler = TimedRotatingFileHandler(log_path.joinpath('pymodaq.log'), when='midnight')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if add_to_console:
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
return logger
logger = set_logger('daq_utils')
def get_version():
with open(str(Path(__file__).parent.parent.joinpath('resources/VERSION')), 'r') as fvers:
version = fvers.read().strip()
return version
def copy_preset(): # pragma: no cover
path = get_set_preset_path().joinpath('preset_default.xml')
if not path.exists(): # copy the preset_default from pymodaq folder and create one in pymodad's local folder
with open(str(Path(__file__).parent.parent.joinpath('resources/preset_default.xml')), 'r') as file:
path.write_text(file.read())
def load_config(config_path=None): # pragma: no cover
if not config_path:
config_path = get_set_local_dir().joinpath('config.toml')
config_base = toml.load(Path(__file__).parent.parent.joinpath('resources/config_template.toml'))
if not config_path.exists(): # copy the template from pymodaq folder and create one in pymodad's local folder
config_path.write_text(toml.dumps(config_base))
# check if all fields are there
config = toml.load(config_path)
if check_config(config_base, config):
config_path.write_text(toml.dumps(config))
return config
def check_config(config_base, config_local):
status = False
for key in config_base:
if key in config_local:
if isinstance(config_base[key], dict):
status = status or check_config(config_base[key], config_local[key])
else:
config_local[key] = config_base[key]
status = True
return status
config = load_config()
class JsonConverter:
def __init__(self):
super().__init__()
@classmethod
def trusted_types(cls):
return ['float', 'int', 'str', 'datetime', 'date', 'time', 'tuple', 'list', 'bool', 'bytes']
@classmethod
def istrusted(cls, type_name):
return type_name in cls.trusted_types()
@classmethod
def object2json(cls, obj):
dic = dict(module=type(obj).__module__, type=type(obj).__name__, data=repr(obj))
return json.dumps(dic)
@classmethod
def json2object(cls, jsonstring):
try:
dic = json.loads(jsonstring)
if isinstance(dic, dict):
if dic['type'] in cls.trusted_types():
return eval(dic['data'])
else:
return dic
else: # pragma: no cover
return dic
except Exception:
return jsonstring
def decode_data(encoded_data):
"""
Decode QbyteArrayData generated when drop items in table/tree/list view
Parameters
----------
encoded_data: QByteArray
Encoded data of the mime data to be dropped
Returns
-------
data: list
list of dict whose key is the QtRole in the Model, and the value a QVariant
"""
data = []
ds = QtCore.QDataStream(encoded_data, QtCore.QIODevice.ReadOnly)
while not ds.atEnd():
row = ds.readInt32()
col = ds.readInt32()
map_items = ds.readInt32()
item = {}
for ind in range(map_items):
key = ds.readInt32()
value = QVariant()
ds >> value
item[QtCore.Qt.ItemDataRole(key)] = value.value()
data.append(item)
return data
# ###################################
# # Units conversion
def Enm2cmrel(E_nm, ref_wavelength=515):
"""Converts energy in nm to cm-1 relative to a ref wavelength
Parameters
----------
E_nm: float
photon energy in wavelength (nm)
ref_wavelength: float
reference wavelength in nm from which calculate the photon relative energy
Returns
-------
float
photon energy in cm-1 relative to the ref wavelength
Examples
--------
>>> Enm2cmrel(530, 515)
549.551199853453
"""
return 1 / (ref_wavelength * 1e-7) - 1 / (E_nm * 1e-7)
def Ecmrel2Enm(Ecmrel, ref_wavelength=515):
"""Converts energy from cm-1 relative to a ref wavelength to an energy in wavelength (nm)
Parameters
----------
Ecmrel: float
photon energy in cm-1
ref_wavelength: float
reference wavelength in nm from which calculate the photon relative energy
Returns
-------
float
photon energy in nm
Examples
--------
>>> Ecmrel2Enm(500, 515)
528.6117526302285
"""
Ecm = 1 / (ref_wavelength * 1e-7) - Ecmrel
return 1 / (Ecm * 1e-7)
def eV2nm(E_eV):
"""Converts photon energy from electronvolt to wavelength in nm
Parameters
----------
E_eV: float
Photon energy in eV
Returns
-------
float
photon energy in nm
Examples
--------
>>> eV2nm(1.55)
799.898112990037
"""
E_J = E_eV * Cb
E_freq = E_J / h
E_nm = c / E_freq * 1e9
return E_nm
def nm2eV(E_nm):
"""Converts photon energy from wavelength in nm to electronvolt
Parameters
----------
E_nm: float
Photon energy in nm
Returns
-------
float
photon energy in eV
Examples
--------
>>> nm2eV(800)
1.549802593918197
"""
E_freq = c / E_nm * 1e9
E_J = E_freq * h
E_eV = E_J / Cb
return E_eV
def E_J2eV(E_J):
E_eV = E_J / Cb
return E_eV
def eV2cm(E_eV):
"""Converts photon energy from electronvolt to absolute cm-1
Parameters
----------
E_eV: float
Photon energy in eV
Returns
-------
float
photon energy in cm-1
Examples
--------
>>> eV2cm(0.07)
564.5880342655984
"""
E_nm = eV2nm(E_eV)
E_cm = 1 / (E_nm * 1e-7)
return E_cm
def nm2cm(E_nm):
"""Converts photon energy from wavelength to absolute cm-1
Parameters
----------
E_nm: float
Photon energy in nm
Returns
-------
float
photon energy in cm-1
Examples
--------
>>> nm2cm(0.04)
0.000025
"""
return 1 / (E_nm * 1e7)
def cm2nm(E_cm):
"""Converts photon energy from absolute cm-1 to wavelength
Parameters
----------
E_cm: float
photon energy in cm-1
Returns
-------
float
Photon energy in nm
Examples
--------
>>> cm2nm(1e5)
100
"""
return 1 / (E_cm * 1e-7)
def eV2E_J(E_eV):
E_J = E_eV * Cb
return E_J
def eV2radfs(E_eV):
E_J = E_eV * Cb
E_freq = E_J / h
E_radfs = E_freq * 2 * np.pi / 1e15
return E_radfs
def l2w(x, speedlight=300):
"""Converts photon energy in rad/fs to nm (and vice-versa)
Parameters
----------
x: float
photon energy in wavelength or rad/fs
speedlight: float, optional
the speed of light, by default 300 nm/fs
Returns
-------
float
Examples
--------
>>> l2w(800)
2.356194490192345
>>> l2w(800,3e8)
2356194.490192345
"""
y = 2 * np.pi * speedlight / x
return y
#############################
def capitalize(string, Nfirst=1):
"""
Returns same string but with first Nfirst letters upper
Parameters
----------
string: (str)
Nfirst: (int)
Returns
-------
str
"""
return string[:Nfirst].upper() + string[Nfirst:]
def uncapitalize(string, Nfirst=1):
return string[:Nfirst].lower() + string[Nfirst:]
def get_data_dimension(arr, scan_type='scan1D', remove_scan_dimension=False):
dimension = len(arr.shape)
if dimension == 1:
if arr.size == 1:
dimension = 0
if remove_scan_dimension:
if scan_type.lower() == 'scan1d':
dimension -= 1
elif scan_type.lower() == 'scan2d':
dimension -= 2
else:
if dimension > 2:
dimension = 'N'
return arr.shape, f'{dimension}D', arr.size
def scroll_log(scroll_val, min_val, max_val):
"""
Convert a scroll value [0-100] to a log scale between min_val and max_val
Parameters
----------
scroll
min_val
max_val
Returns
-------
"""
assert scroll_val >= 0
assert scroll_val <= 100
value = scroll_val * (np.log10(max_val) - np.log10(min_val)) / 100 + np.log10(min_val)
return 10 ** value
def scroll_linear(scroll_val, min_val, max_val):
"""
Convert a scroll value [0-100] to a linear scale between min_val and max_val
Parameters
----------
scroll
min_val
max_val
Returns
-------
"""
assert scroll_val >= 0
assert scroll_val <= 100
value = scroll_val * (max_val - min_val) / 100 + min_val
return value
def getLineInfo():
"""get information about where the Exception has been triggered"""
tb = sys.exc_info()[2]
res = ''
for t in traceback.format_tb(tb):
res += t
return res
class ThreadCommand(object):
""" | Micro class managing the thread commands.
|
| A thread command is composed of a string name defining the command to execute and an attribute list splitable making arguments of the called function.
=============== =============
**Attributes** **Type**
*command* string
*attributes* generic list
=============== =============
"""
def __init__(self, command="", attributes=[]):
self.command = command
self.attributes = attributes
class Axis(dict):
"""
Utility class defining an axis for pymodaq's viewers, attributes can be accessed as dictionary keys
"""
def __init__(self, data=None, label='', units='', **kwargs):
"""
Parameters
----------
data
label
units
"""
if units is None:
units = ''
if label is None:
label = ''
if data is None or isinstance(data, np.ndarray):
self['data'] = data
else:
raise TypeError('data for the Axis class should be a ndarray')
if not isinstance(label, str):
raise TypeError('label for the Axis class should be a string')
self['label'] = label
if not isinstance(units, str):
raise TypeError('units for the Axis class should be a string')
self['units'] = units
self.update(kwargs)
class NavAxis(Axis):
def __init__(self, data=None, label='', units='', nav_index=-1, **kwargs):
super().__init__(data=data, label=label, units=units, **kwargs)
if nav_index < 0:
raise ValueError('nav_index should be a positive integer representing the index of this axis among all'
'navigation axes')
self['nav_index'] = nav_index
class Data(OrderedDict):
def __init__(self, name='', source='raw', distribution='uniform', x_axis=Axis(), y_axis=Axis(), **kwargs):
"""
Generic class subclassing from OrderedDict defining data being exported from pymodaq's plugin or viewers,
attributes can be accessed as dictionary keys. Should be subclassed from for real datas
Parameters
----------
source: (str) either 'raw' or 'roi...' if straight from a plugin or data processed within a viewer
distribution: (str) either 'uniform' or 'spread'
x_axis: (Axis) Axis class defining the corresponding axis (with data either linearly spaced or containing the
x positions of the spread points)
y_axis: (Axis) Axis class defining the corresponding axis (with data either linearly spaced or containing the
x positions of the spread points)
"""
if not isinstance(name, str):
raise TypeError('name for the DataToExport class should be a string')
self['name'] = name
if not isinstance(source, str):
raise TypeError('source for the DataToExport class should be a string')
elif not ('raw' in source or 'roi' in source):
raise ValueError('Invalid "source" for the DataToExport class')
self['source'] = source
if not isinstance(distribution, str):
raise TypeError('distribution for the DataToExport class should be a string')
elif distribution not in ('uniform', 'spread'):
raise ValueError('Invalid "distribution" for the DataToExport class')
self['distribution'] = distribution
if not isinstance(x_axis, Axis):
if isinstance(x_axis, np.ndarray):
x_axis = Axis(data=x_axis)
else:
raise TypeError('x_axis for the DataToExport class should be a Axis class')
self['x_axis'] = x_axis
elif x_axis['data'] is not None:
self['x_axis'] = x_axis
if not isinstance(y_axis, Axis):
if isinstance(y_axis, np.ndarray):
y_axis = Axis(data=y_axis)
else:
raise TypeError('y_axis for the DataToExport class should be a Axis class')
self['y_axis'] = y_axis
elif y_axis['data'] is not None:
self['y_axis'] = y_axis
for k in kwargs:
self[k] = kwargs[k]
class DataFromPlugins(Data):
def __init__(self, data=None, dim='', labels=[], nav_axes=[], nav_x_axis=Axis(), nav_y_axis=Axis(), **kwargs):
"""
Parameters
----------
dim: (str) data dimensionality (either Data0D, Data1D, Data2D or DataND)
"""
super().__init__(**kwargs)
self['labels'] = labels
if len(nav_axes) != 0:
self['nav_axes'] = nav_axes
if nav_x_axis['data'] is not None:
self['nav_x_axis'] = nav_x_axis
if nav_y_axis['data'] is not None:
self['nav_y_axis'] = nav_y_axis
iscorrect = True
if data is not None:
if isinstance(data, list):
for dat in data:
if not isinstance(dat, np.ndarray):
iscorrect = False
else:
iscorrect = False
if iscorrect:
self['data'] = data
else:
raise TypeError('data for the DataFromPlugins class should be None or a list of numpy arrays')
if dim not in ('Data0D', 'Data1D', 'Data2D', 'DataND') and data is not None:
ndim = len(data[0].shape)
if ndim == 1:
if data[0].size == 1:
dim = 'Data0D'
else:
dim = 'Data1D'
elif ndim == 2:
dim = 'Data2D'
else:
dim = 'DataND'
self['dim'] = dim
class DataToExport(Data):
def __init__(self, data=None, dim='', **kwargs):
"""
Utility class defining a data being exported from pymodaq's viewers, attributes can be accessed as dictionary keys
Parameters
----------
data: (ndarray or a scalar)
dim: (str) data dimensionality (either Data0D, Data1D, Data2D or DataND)
"""
super().__init__(**kwargs)
if data is None or isinstance(data, np.ndarray) or isinstance(data, float) or isinstance(data, int):
self['data'] = data
else:
raise TypeError('data for the DataToExport class should be a scalar or a ndarray')
if dim not in ('Data0D', 'Data1D', 'Data2D', 'DataND') or data is not None:
if isinstance(data, np.ndarray):
ndim = len(data.shape)
if ndim == 1:
if data.size == 1:
dim = 'Data0D'
else:
dim = 'Data1D'
elif ndim == 2:
dim = 'Data2D'
else:
dim = 'DataND'
else:
dim = 'Data0D'
self['dim'] = dim
class ScaledAxis(Axis):
def __init__(self, label='', units='', offset=0, scaling=1):
super().__init__(label=label, units=units)
if not (isinstance(offset, float) or isinstance(offset, int)):
raise TypeError('offset for the ScalingAxis class should be a float (or int)')
self['offset'] = offset
if not (isinstance(scaling, float) or isinstance(scaling, int)):
raise TypeError('scaling for the ScalingAxis class should be a non null float (or int)')
if scaling == 0 or scaling == 0.:
raise ValueError('scaling for the ScalingAxis class should be a non null float (or int)')
self['scaling'] = scaling
class ScalingOptions(dict):
def __init__(self, scaled_xaxis=ScaledAxis(), scaled_yaxis=ScaledAxis()):
assert isinstance(scaled_xaxis, ScaledAxis)
assert isinstance(scaled_yaxis, ScaledAxis)
self['scaled_xaxis'] = scaled_xaxis
self['scaled_yaxis'] = scaled_yaxis
def recursive_find_files_extension(ini_path, ext, paths=[]):
with os.scandir(ini_path) as it:
for entry in it:
if os.path.splitext(entry.name)[1][1:] == ext and entry.is_file():
paths.append(entry.path)
elif entry.is_dir():
recursive_find_files_extension(entry.path, ext, paths)
return paths
def recursive_find_expr_in_files(ini_path, exp='make_enum', paths=[],
filters=['.git', '.idea', '__pycache__', 'build', 'egg', 'documentation', '.tox']):
for child in Path(ini_path).iterdir():
if not any(filt in str(child) for filt in filters):
if child.is_dir():
recursive_find_expr_in_files(child, exp, paths, filters)
else:
try:
with child.open('r') as f:
for ind, line in enumerate(f.readlines()):
if exp in line:
paths.append([child, ind])
except Exception:
pass
return paths
def count_lines(ini_path, count=0, filters=['lextab', 'yacctab','pycache', 'pyc']):
# if Path(ini_path).is_file():
# with Path(ini_path).open('r') as f:
# count += len(f.readlines())
# return count
for child in Path(ini_path).iterdir():
if child.is_dir():
count = count_lines(child, count)
else:
try:
if not any([filt in child.name for filt in filters]):
if '.py' in child.name:
with child.open('r') as f:
count += len(f.readlines())
else:
print(child.stem)
except Exception:
pass
return count
def remove_spaces(string):
"""
return a string without any white spaces in it
Parameters
----------
string
Returns
-------
"""
return ''.join(string.split())
def rint(x):
"""
almost same as numpy rint function but return an integer
Parameters
----------
x: (float or integer)
Returns
-------
nearest integer
"""
return int(np.rint(x))
def elt_as_first_element(elt_list, match_word='Mock'):
if not hasattr(elt_list, '__iter__'):
raise TypeError('elt_list must be an iterable')
if elt_list:
ind_elt = 0
for ind, elt in enumerate(elt_list):
if not isinstance(elt, str):
raise TypeError('elt_list must be a list of str')
if match_word in elt:
ind_elt = ind
break
plugin_match = elt_list[ind_elt]
elt_list.remove(plugin_match)
plugins = [plugin_match]
plugins.extend(elt_list)
else:
plugins = []
return plugins
def elt_as_first_element_dicts(elt_list, match_word='Mock', key='name'):
if not hasattr(elt_list, '__iter__'):
raise TypeError('elt_list must be an iterable')
if elt_list:
ind_elt = 0
for ind, elt in enumerate(elt_list):
if not isinstance(elt, dict):
raise TypeError('elt_list must be a list of dicts')
if match_word in elt[key]:
ind_elt = ind
break
plugin_match = elt_list[ind_elt]
elt_list.remove(plugin_match)
plugins = [plugin_match]
plugins.extend(elt_list)
else:
plugins = []
return plugins
def get_extensions():
"""
Get pymodaq extensions as a list
Returns
-------
list: list of disct containting the name and module of the found extension
"""
extension_import = []
entry_points = metadata.entry_points()
if 'pymodaq.extensions' in entry_points:
discovered_extension = entry_points['pymodaq.extensions']
for pkg in discovered_extension:
try:
module = importlib.import_module(pkg.value)
if hasattr(module, 'NICE_NAME'):
name = module.NICE_NAME
else:
name = pkg.value
extension = {'name': name, 'module': module}
extension_import.append(extension)
except Exception as e: # pragma: no cover
logger.warning(f'Impossible to import the {pkg.value} extension: {str(e)}')
return extension_import
def find_dict_if_matched_key_val(dict_tmp, key, value):
"""
check if a key/value pair match in a given dictionnary
Parameters
----------
dict_tmp: (dict) the dictionnary to be tested
key: (str) a key string to look for in dict_tmp
value: (object) any python object
Returns
-------
bool: True if the key/value pair has been found in dict_tmp
"""
if key in dict_tmp:
if dict_tmp[key] == value:
return True
return False
def find_dict_in_list_from_key_val(dicts, key, value, return_index=False):
""" lookup within a list of dicts. Look for the dict within the list which has the correct key, value pair
Parameters
----------
dicts: (list) list of dictionnaries
key: (str) specific key to look for in each dict
value: value to match
Returns
-------
dict: if found otherwise returns None
"""
for ind, dict_tmp in enumerate(dicts):
if find_dict_if_matched_key_val(dict_tmp, key, value):
if return_index:
return dict_tmp, ind
else:
return dict_tmp
if return_index:
return None, -1
else:
return None
def get_models(model_name=None):
"""
Get PID Models as a list to instantiate Control Actuators per degree of liberty in the model
Returns
-------
list: list of disct containting the name and python module of the found models
"""
from pymodaq.pid.utils import PIDModelGeneric
models_import = []
entry_points = metadata.entry_points()
if 'pymodaq.pid_models' in entry_points:
discovered_models = entry_points['pymodaq.pid_models']
for pkg in discovered_models:
try:
module = importlib.import_module(pkg.value)
module_name = pkg.value
for mod in pkgutil.iter_modules([str(Path(module.__file__).parent.joinpath('models'))]):
try:
model_module = importlib.import_module(f'{module_name}.models.{mod.name}', module)
classes = inspect.getmembers(model_module, inspect.isclass)
for name, klass in classes:
if klass.__base__ is PIDModelGeneric:
models_import.append({'name': mod.name, 'module': model_module, 'class': klass})
break
except Exception as e: # pragma: no cover
logger.warning(str(e))
except Exception as e: # pragma: no cover
logger.warning(f'Impossible to import the {pkg.value} extension: {str(e)}')
if model_name is None:
return models_import
else:
return find_dict_in_list_from_key_val(models_import, 'name', model_name)
def get_plugins(plugin_type='daq_0Dviewer'): # pragma: no cover
"""
Get plugins names as a list
Parameters
----------
plugin_type: (str) plugin type either 'daq_0Dviewer', 'daq_1Dviewer', 'daq_2Dviewer', 'daq_NDviewer' or 'daq_move'
module: (module) parent module of the plugins
Returns
-------
"""
plugins_import = []
discovered_plugins = metadata.entry_points()['pymodaq.plugins']
for module in discovered_plugins:
try:
if plugin_type == 'daq_move':
submodule = importlib.import_module(f'{module.value}.daq_move_plugins', module.value)
else:
submodule = importlib.import_module(f'{module.value}.daq_viewer_plugins.plugins_{plugin_type[4:6]}',
module.value)
plugin_list = [{'name': mod[len(plugin_type) + 1:],
'module': submodule} for mod in [mod[1] for
mod in pkgutil.iter_modules([submodule.path.parent])]
if plugin_type in mod]
# check if modules are importable
for mod in plugin_list:
try:
if plugin_type == 'daq_move':
importlib.import_module(f'{submodule.__package__}.daq_move_{mod["name"]}')
else:
importlib.import_module(f'{submodule.__package__}.daq_{plugin_type[4:6]}viewer_{mod["name"]}')
plugins_import.append(mod)
except Exception: # pragma: no cover
pass
except Exception: # pragma: no cover
pass
#add utility plugin for PID
if plugin_type == 'daq_move':
try:
submodule = importlib.import_module('pymodaq.pid')
plugins_import.append({'name': 'PID', 'module': submodule})
except Exception: # pragma: no cover
pass
plugins_import = elt_as_first_element_dicts(plugins_import, match_word='Mock', key='name')
return plugins_import
def check_vals_in_iterable(iterable1, iterable2):
assert len(iterable1) == len(iterable2)
iterable1 = list(iterable1) # so the assertion below is valid for any kind of iterable, list, tuple, ndarray...
iterable2 = list(iterable2)
for val1, val2 in zip(iterable1, iterable2):
assert val1 == val2
def get_set_preset_path():
""" creates and return the config folder path for managers files
"""
return get_set_config_path('preset_configs')
def get_set_batch_path():
""" creates and return the config folder path for managers files
"""
return get_set_config_path('batch_configs')
def get_set_pid_path():
""" creates and return the config folder path for PID files
"""
return get_set_config_path('pid_configs')
def get_set_layout_path():
""" creates and return the config folder path for layout files
"""
return get_set_config_path('layout_configs')
def get_set_remote_path():
""" creates and return the config folder path for remote (shortcuts or joystick) files
"""
return get_set_config_path('remote_configs')
def get_set_overshoot_path():
""" creates and return the config folder path for overshoot files
"""
return get_set_config_path('overshoot_configs')
def get_set_roi_path():
""" creates and return the config folder path for managers files
"""
return get_set_config_path('roi_configs')
def get_module_name(module__file__path):
"""from the full path of a module extract its name"""
path = Path(module__file__path)
return path.stem
def caller_name(skip=2):
"""Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(techtonik): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call - it will
# be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename) # function or a method
del parentframe
return ".".join(name)
def zeros_aligned(n, align, dtype=np.uint32):
"""
Get aligned memory array wih alignment align.
Parameters
----------
n: (int) length in dtype bytes of memory
align: (int) memory alignment
dtype: (numpy.dtype) type of the stored memory elements
Returns
-------
"""
dtype = np.dtype(dtype)
nbytes = n * dtype.itemsize
buff = np.zeros(nbytes + align, dtype=np.uint8)
start_index = -buff.ctypes.data % align
return buff[start_index:start_index + nbytes].view(dtype)
def cfunc(name, dll, result, *args):
"""build and apply a ctypes prototype complete with parameter flags
Parameters
----------
name: (str) function name in the dll
dll: (ctypes.windll) dll object
result : result is the type of the result (c_int,..., python function handle,...)
args: list of tuples with 3 or 4 elements each like (argname, argtype, in/out, default) where argname is the
name of the argument, argtype is the type, in/out is 1 for input and 2 for output, and default is an optional
default value.
Returns
-------
python function
"""
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return CFUNCTYPE(result, *atypes)((name, dll), tuple(aflags))
def winfunc(name, dll, result, *args):
"""build and apply a ctypes prototype complete with parameter flags
Parameters
----------
name:(str) function name in the dll
dll: (ctypes.windll) dll object
result: result is the type of the result (c_int,..., python function handle,...)
args: list of tuples with 3 or 4 elements each like (argname, argtype, in/out, default) where argname is the
name of the argument, argtype is the type, in/out is 1 for input and 2 for output, and default is an optional
default value.
Returns
-------
python function
"""
atypes = []
aflags = []
for arg in args:
atypes.append(arg[1])
aflags.append((arg[2], arg[0]) + arg[3:])
return WINFUNCTYPE(result, *atypes)((name, dll), tuple(aflags))
def set_param_from_param(param_old, param_new):
"""
Walk through parameters children and set values using new parameter values.
"""
for child_old in param_old.children():
# try:
path = param_old.childPath(child_old)
child_new = param_new.child(*path)
param_type = child_old.type()
if 'group' not in param_type: # covers 'group', custom 'groupmove'...
# try:
if 'list' in param_type: # check if the value is in the limits of the old params (limits are usually set at initialization)
if child_new.value() not in child_old.opts['limits']:
child_old.opts['limits'].append(child_new.value())
child_old.setValue(child_new.value())
elif 'str' in param_type or 'browsepath' in param_type or 'text' in param_type:
if child_new.value() != "": # to make sure one doesnt overwrite something
child_old.setValue(child_new.value())
else:
child_old.setValue(child_new.value())
# except Exception as e:
# print(str(e))
else:
set_param_from_param(child_old, child_new)
# except Exception as e:
# print(str(e))
# ########################
# #File management
def get_new_file_name(base_path=Path(config['data_saving']['h5file']['save_path']), base_name='tttr_data'):
if isinstance(base_path, str):
base_path = Path(base_path)
today = datetime.datetime.now()
date = today.strftime('%Y%m%d')
year = today.strftime('%Y')
year_dir = base_path.joinpath(year)
if not year_dir.is_dir():
year_dir.mkdir()
curr_dir = base_path.joinpath(year, date)
if not curr_dir.is_dir():
curr_dir.mkdir()
files = []
for entry in curr_dir.iterdir():
if entry.name.startswith(base_name) and entry.is_file():
files.append(entry.stem)
files.sort()
if not files:
index = 0
else:
index = int(files[-1][-3:]) + 1
file = f'{base_name}_{index:03d}'
return file, curr_dir
# ##############
# Math utilities
def my_moment(x, y):
"""Returns the moments of a distribution y over an axe x
Parameters
----------
x: list or ndarray
vector of floats
y: list or ndarray
vector of floats corresponding to the x axis
Returns
-------
m: list
Contains moment of order 0 (mean) and of order 1 (std) of the distribution y
"""
dx = np.mean(np.diff(x))
norm = np.sum(y) * dx
m = [np.sum(x * y) * dx / norm]
m.extend([np.sqrt(np.sum((x - m[0]) ** 2 * y) * dx / norm)])
return m
def normalize(x):
x = x - np.min(x)
x = x / np.max(x)
return x
def odd_even(x):
"""
odd_even tells if a number is odd (return True) or even (return False)
Parameters
----------
x: the integer number to test
Returns
-------
bool : boolean
"""
if not isinstance(x, int):
raise TypeError(f'{x} should be an integer')
if int(x) % 2 == 0:
bool = False
else:
bool = True
return bool
def greater2n(x):
"""
return the first power of 2 greater than x
Parameters
----------
x: (int or float) a number
Returns
-------
int: the power of 2 greater than x
"""
if isinstance(x, bool):
raise TypeError(f'{x} should be an integer or a float')
if hasattr(x, '__iter__'):
res = []
for el in x:
if isinstance(el, bool):
raise TypeError(f'{el} should be an integer or a float')
if not (isinstance(el, int) or isinstance(el, float)):
raise TypeError(f'{x} elements should be integer or float')
res.append(1 << (int(el) - 1).bit_length())
if isinstance(x, np.ndarray):
return np.array(res)
else:
return res
else:
if not (isinstance(x, int) or isinstance(x, float)):
raise TypeError(f'{x} should be an integer or a float')
return 1 << (int(x) - 1).bit_length()
def linspace_step(start, stop, step):
"""
Compute a regular linspace_step distribution from start to stop values.
=============== =========== ======================================
**Parameters** **Type** **Description**
*start* scalar the starting value of distribution
*stop* scalar the stopping value of distribution
*step* scalar the length of a distribution step
=============== =========== ======================================
Returns
-------
scalar array
The computed distribution axis as an array.
"""
if np.abs(step) < 1e-12 or np.sign(stop - start) != np.sign(step) or start == stop:
raise ValueError('Invalid value for one parameter')
Nsteps = int(np.ceil((stop - start) / step))
new_stop = start + (Nsteps - 1) * step
if np.abs(new_stop + step - stop) < 1e-12:
Nsteps += 1
new_stop = start + (Nsteps - 1) * step
return np.linspace(start, new_stop, Nsteps)
def find_index(x, threshold):
"""
find_index finds the index ix such that x(ix) is the closest from threshold
Parameters
----------
x : vector
threshold : list of scalar
Returns
-------
out : list of 2-tuple containing ix,x[ix]
out=[(ix0,xval0),(ix1,xval1),...]
"""
if not hasattr(threshold, '__iter__'):
threshold = [threshold]
out = []
for value in threshold:
ix = int(np.argmin(np.abs(x - value)))
out.append((ix, x[ix]))
return out
def find_common_index(x, y, x0, y0):
vals = x + 1j * y
val = x0 + 1j * y0
ind = int(np.argmin(np.abs(vals - val)))
return ind, x[ind], y[ind]
def gauss1D(x, x0, dx, n=1):
"""
compute the gaussian function along a vector x, centered in x0 and with a
FWHM i intensity of dx. n=1 is for the standart gaussian while n>1 defines
a hypergaussian
Parameters
----------
x: (ndarray) first axis of the 2D gaussian
x0: (float) the central position of the gaussian
dx: (float) :the FWHM of the gaussian
n=1 : an integer to define hypergaussian, n=1 by default for regular gaussian
Returns
-------
out : vector
the value taken by the gaussian along x axis
"""
if dx <= 0:
raise ValueError('dx should be strictly positive')
if not isinstance(n, int):
raise TypeError('n should be a positive integer')
elif n < 0:
raise ValueError('n should be a positive integer')
out = np.exp(-2 * np.log(2) ** (1 / n) * (((x - x0) / dx)) ** (2 * n))
return out
# def rotate_2D_array(arr, angle):
# theta = np.radians(angle)
# c, s = np.cos(theta), np.sin(theta)
# R = np.array(((c, -s), (s, c)))
# (x0r, y0r) = tuple(R.dot(np.array([x0, y0])))
#
# data = np.zeros((len(y), len(x)))
#
# for indx, xtmp in enumerate(x):
# for indy, ytmp in enumerate(y):
# rotatedvect = R.dot(np.array([xtmp, ytmp]))
# data[indy, indx] = np.exp(
# -2 * np.log(2) ** (1 / n) * ((rotatedvect[0] - x0r) / dx) ** (2 * n)) * np.exp(
# -2 * np.log(2) ** (1 / n) * ((rotatedvect[1] - y0r) / dy) ** (2 * n))
#
# return data
def gauss2D(x, x0, dx, y, y0, dy, n=1, angle=0):
"""
compute the 2D gaussian function along a vector x, centered in x0 and with a
FWHM in intensity of dx and smae along y axis. n=1 is for the standard gaussian while n>1 defines
a hypergaussian. optionally rotate it by an angle in degree
Parameters
----------
x: (ndarray) first axis of the 2D gaussian
x0: (float) the central position of the gaussian
dx: (float) :the FWHM of the gaussian
y: (ndarray) second axis of the 2D gaussian
y0: (float) the central position of the gaussian
dy: (float) :the FWHM of the gaussian
n=1 : an integer to define hypergaussian, n=1 by default for regular gaussian
angle: (float) a float to rotate main axes, in degree
Returns
-------
out : ndarray 2 dimensions
"""
if angle == 0:
data = np.transpose(np.outer(gauss1D(x, x0, dx, n), gauss1D(y, y0, dy, n)))
else:
theta = np.radians(angle)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s), (s, c)))
(x0r, y0r) = tuple(R.dot(np.array([x0, y0])))
data = np.zeros((len(y), len(x)))
for indx, xtmp in enumerate(x):
for indy, ytmp in enumerate(y):
rotatedvect = R.dot(np.array([xtmp, ytmp]))
data[indy, indx] = gauss1D(rotatedvect[0], x0r, dx, n) * gauss1D(rotatedvect[1], y0r, dy, n)
return data
def ftAxis(Npts, omega_max):
"""
Given two numbers Npts,omega_max, return two vectors spanning the temporal
and spectral range. They are related by Fourier Transform
Parameters
----------
Npts: (int)
A number of points defining the length of both grids
omega_max: (float)
The maximum circular frequency in the spectral domain. its unit defines
the temporal units. ex: omega_max in rad/fs implies time_grid in fs
Returns
-------
omega_grid: (ndarray)
The spectral axis of the FFT
time_grid: (ndarray))
The temporal axis of the FFT
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
if not isinstance(Npts, int):
raise TypeError('n should be a positive integer, if possible power of 2')
elif Npts < 1:
raise ValueError('n should be a strictly positive integer')
dT = 2 * np.pi / (2 * omega_max)
omega_grid = np.linspace(-omega_max, omega_max, Npts)
time_grid = dT * np.linspace(-(Npts - 1) / 2, (Npts - 1) / 2, Npts)
return omega_grid, time_grid
def ftAxis_time(Npts, time_max):
"""
Given two numbers Npts,omega_max, return two vectors spanning the temporal
and spectral range. They are related by Fourier Transform
Parameters
----------
Npts : number
A number of points defining the length of both grids
time_max : number
The maximum tmporal window
Returns
-------
omega_grid : vector
The spectral axis of the FFT
time_grid : vector
The temporal axis of the FFT
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
if not isinstance(Npts, int):
raise TypeError('n should be a positive integer, if possible power of 2')
elif Npts < 1:
raise ValueError('n should be a strictly positive integer')
dT = time_max / Npts
omega_max = (Npts - 1) / 2 * 2 * np.pi / time_max
omega_grid = np.linspace(-omega_max, omega_max, Npts)
time_grid = dT * np.linspace(-(Npts - 1) / 2, (Npts - 1) / 2, Npts)
return omega_grid, time_grid
def ft(x, dim=-1):
"""
Process the 1D fast fourier transform and swaps the axis to get coorect results using ftAxis
Parameters
----------
x: (ndarray) the array on which the FFT should be done
dim: the axis over which is done the FFT (default is the last of the array)
Returns
-------
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
if not isinstance(dim, int):
raise TypeError('dim should be an integer specifying the array dimension over which to do the calculation')
assert isinstance(x, np.ndarray)
assert dim >= -1
assert dim <= len(x.shape) - 1
out = np.fft.fftshift(np.fft.fft(np.fft.fftshift(x, axes=dim), axis=dim), axes=dim)
return out
def ift(x, dim=0):
"""
Process the inverse 1D fast fourier transform and swaps the axis to get correct results using ftAxis
Parameters
----------
x: (ndarray) the array on which the FFT should be done
dim: the axis over which is done the FFT (default is the last of the array)
Returns
-------
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
if not isinstance(dim, int):
raise TypeError('dim should be an integer specifying the array dimension over which to do the calculation')
assert isinstance(x, np.ndarray)
assert dim >= -1
assert dim <= len(x.shape) - 1
out = np.fft.fftshift(np.fft.ifft(np.fft.fftshift(x, axes=dim), axis=dim), axes=dim)
return out
def ft2(x, dim=(-2, -1)):
"""
Process the 2D fast fourier transform and swaps the axis to get correct results using ftAxis
Parameters
----------
x: (ndarray) the array on which the FFT should be done
dim: the axis over which is done the FFT (default is the last of the array)
Returns
-------
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
assert isinstance(x, np.ndarray)
if hasattr(dim, '__iter__'):
for d in dim:
if not isinstance(d, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert d <= len(x.shape)
else:
if not isinstance(dim, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert dim <= len(x.shape)
out = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(x, axes=dim)), axes=dim)
return out
def ift2(x, dim=(-2, -1)):
"""
Process the inverse 2D fast fourier transform and swaps the axis to get correct results using ftAxis
Parameters
----------
x: (ndarray) the array on which the FFT should be done
dim: the axis (or a tuple of axes) over which is done the FFT (default is the last of the array)
Returns
-------
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
assert isinstance(x, np.ndarray)
if hasattr(dim, '__iter__'):
for d in dim:
if not isinstance(d, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert d <= len(x.shape)
else:
if not isinstance(dim, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert dim <= len(x.shape)
out = np.fft.fftshift(np.fft.ifft2(np.fft.fftshift(x, axes=dim)), axes=dim)
return out
if __name__ == '__main__':
#paths = recursive_find_expr_in_files('C:\\Users\\weber\\Labo\\Programmes Python\\PyMoDAQ_Git', 'visa')
# for p in paths:
# print(str(p))
# v = get_version()
# pass
#plugins = get_plugins() # pragma: no cover
#extensions = get_extension()
#models = get_models()
count = count_lines('C:\\Users\\weber\\Labo\\Programmes Python\\PyMoDAQ_Git\\pymodaq\src')
pass
|
### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXMPLY(gxapi_cy.WrapMPLY):
"""
GXMPLY class.
The `GXMPLY <geosoft.gxapi.GXMPLY>` object contains the definitions for one or more
PPLY.
"""
def __init__(self, handle=0):
super(GXMPLY, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXMPLY <geosoft.gxapi.GXMPLY>`
:returns: A null `GXMPLY <geosoft.gxapi.GXMPLY>`
:rtype: GXMPLY
"""
return GXMPLY()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
@classmethod
def create(cls):
"""
Creates a Multi Polygon Object.
:returns: `GXMPLY <geosoft.gxapi.GXMPLY>` Handle
:rtype: GXMPLY
.. versionadded:: 9.5
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapMPLY._create(GXContext._get_tls_geo())
return GXMPLY(ret_val)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms.spatial.array import GridPatch
from tests.utils import TEST_NDARRAYS, assert_allclose
A = np.arange(16).repeat(3).reshape(4, 4, 3).transpose(2, 0, 1)
A11 = A[:, :2, :2]
A12 = A[:, :2, 2:]
A21 = A[:, 2:, :2]
A22 = A[:, 2:, 2:]
TEST_CASE_0 = [{"patch_size": (2, 2)}, A, [A11, A12, A21, A22]]
TEST_CASE_1 = [{"patch_size": (2, 2), "num_patches": 3}, A, [A11, A12, A21]]
TEST_CASE_2 = [{"patch_size": (2, 2), "num_patches": 5}, A, [A11, A12, A21, A22, np.zeros((3, 2, 2))]]
TEST_CASE_3 = [{"patch_size": (2, 2), "offset": (0, 0)}, A, [A11, A12, A21, A22]]
TEST_CASE_4 = [{"patch_size": (2, 2), "offset": (0, 0)}, A, [A11, A12, A21, A22]]
TEST_CASE_5 = [{"patch_size": (2, 2), "offset": (2, 2)}, A, [A22]]
TEST_CASE_6 = [{"patch_size": (2, 2), "offset": (0, 2)}, A, [A12, A22]]
TEST_CASE_7 = [{"patch_size": (2, 2), "offset": (2, 0)}, A, [A21, A22]]
TEST_CASE_8 = [{"patch_size": (2, 2), "num_patches": 3, "sort_fn": "max"}, A, [A22, A21, A12]]
TEST_CASE_9 = [{"patch_size": (2, 2), "num_patches": 4, "sort_fn": "min"}, A, [A11, A12, A21, A22]]
TEST_CASE_10 = [{"patch_size": (2, 2), "overlap": 0.5, "num_patches": 3}, A, [A11, A[:, :2, 1:3], A12]]
TEST_CASE_11 = [
{"patch_size": (3, 3), "num_patches": 2, "constant_values": 255},
A,
[A[:, :3, :3], np.pad(A[:, :3, 3:], ((0, 0), (0, 0), (0, 2)), mode="constant", constant_values=255)],
]
TEST_CASE_12 = [
{"patch_size": (3, 3), "offset": (-2, -2), "num_patches": 2},
A,
[np.zeros((3, 3, 3)), np.pad(A[:, :1, 1:4], ((0, 0), (2, 0), (0, 0)), mode="constant")],
]
TEST_CASE_13 = [{"patch_size": (2, 2), "threshold": 50.0}, A, [A11]]
TEST_SINGLE = []
for p in TEST_NDARRAYS:
TEST_SINGLE.append([p, *TEST_CASE_0])
TEST_SINGLE.append([p, *TEST_CASE_1])
TEST_SINGLE.append([p, *TEST_CASE_2])
TEST_SINGLE.append([p, *TEST_CASE_3])
TEST_SINGLE.append([p, *TEST_CASE_4])
TEST_SINGLE.append([p, *TEST_CASE_5])
TEST_SINGLE.append([p, *TEST_CASE_6])
TEST_SINGLE.append([p, *TEST_CASE_7])
TEST_SINGLE.append([p, *TEST_CASE_8])
TEST_SINGLE.append([p, *TEST_CASE_9])
TEST_SINGLE.append([p, *TEST_CASE_10])
TEST_SINGLE.append([p, *TEST_CASE_11])
TEST_SINGLE.append([p, *TEST_CASE_12])
TEST_SINGLE.append([p, *TEST_CASE_13])
class TestGridPatch(unittest.TestCase):
@parameterized.expand(TEST_SINGLE)
def test_grid_patch(self, in_type, input_parameters, image, expected):
input_image = in_type(image)
splitter = GridPatch(**input_parameters)
output = list(splitter(input_image))
self.assertEqual(len(output), len(expected))
for output_patch, expected_patch in zip(output, expected):
assert_allclose(output_patch[0], expected_patch, type_test=False)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 2 16:06:59 2019
@author: rainfall
"""
import settings
import numpy as np
import pandas as pd
import os, sys
import logging
from PRE_PROCESS_TRAINING.PCA import HelloFunction
class BRain:
'''
This is my custom rain classifier embeded with an automatic screening method
:param infile: sets the input file path (string)
:param file: file name of the input CSV data (string)
:param outfile: sets the output file of the network (string)
'''
def __init__(self):
self.IN_CSV_LIST = settings.IN_CSV_LIST
self.OUT_CSV_LIST = settings.OUT_CSV_LIST
self.LAT_LIMIT = settings.LAT_LIMIT
self.LON_LIMIT = settings.LON_LIMIT
self.THRESHOLD_RAIN = settings.THRESHOLD_RAIN
self.RAIN_CSV = settings.RAIN_CSV
self.NORAIN_CSV = settings.NORAIN_CSV
self.COLUMN_TYPES = settings.COLUMN_TYPES
def LoadCSV(self, path, file):
'''
Load CSV files (original)
:param path: sets the csv files path (string)
:param file: file name or file list (string)
:return: dataframe (DataFrame)
'''
if file.startswith(".", 0, len(file)):
print("File name starts with point: {} - Skipping...".format(file))
elif file.endswith(".csv"):
try:
dataframe = pd.DataFrame()
dataframe = pd.read_csv(os.path.join(path, file), sep=',', header=5, skipinitialspace=True, decimal='.', dtype=self.COLUMN_TYPES)
print('Dataframe {} was loaded'.format(file))
except:
print('Unexpected error:', sys.exc_info()[0])
return dataframe
def ExtractRegion(self, dataframe):
'''
Extract regional areas from the global dataset (original)
:param dataframe: original global dataframe (DataFrame)
:return: dataframe_regional (DataFrame)
'''
print("Extracting region from dataframe using LAT limits: '{}' and LON limits: '{}'".format(
self.LAT_LIMIT,
self.LON_LIMIT))
subset = np.where(
(dataframe['lat']<=self.LAT_LIMIT[1]) &
(dataframe['lat']>=self.LAT_LIMIT[0]) &
(dataframe['lon']<=self.LON_LIMIT[1]) &
(dataframe['lon']>=self.LON_LIMIT[0]))
dataframe_regional=dataframe.copy()
dataframe_regional=dataframe.iloc[subset]
dataframe_regional.drop(['numpixs'], axis=1, inplace=True)
print("Extraction completed!")
return dataframe_regional
def ThresholdRainNoRain(self, dataframe_regional):
'''
Defines the minimum threshold to consider in the Rain Dataset
:param dataframe_regional: the regional dataset with all pixels (rain and no rain)(DataFrame)
:return: rain and norain dataframes (DataFrame)
'''
# Rain/No Rain threshold(th):
threshold_rain = self.THRESHOLD_RAIN
rain_pixels = np.where((dataframe_regional['sfcprcp']>=threshold_rain))
norain_pixels = np.where((dataframe_regional['sfcprcp']<threshold_rain))
df_reg_copy = dataframe_regional.copy()
dataframe_rain = df_reg_copy.iloc[rain_pixels]
dataframe_norain = df_reg_copy.iloc[norain_pixels]
print("Dataframes Rain and NoRain created!")
return dataframe_rain, dataframe_norain
def PrintSettings(self):
'''
Shows the settings of the main parameters necessary to process the algorithm.
'''
print(self.__dict__)
def ConcatenationMonthlyDF(self, path, dataframe_name):
'''
Concatenate the monthly rain and norain dataframes into yearly dataframes.
'''
frames = []
for idx, file in enumerate(os.listdir(path)):
if file.startswith(".", 0, len(file)):
print("File name starts with point: ", file)
else:
logging.debug(file)
print("posicao do loop: {} | elemento da pasta: {}".format(idx, file))
df = pd.read_csv(os.path.join(path, file), sep=',', decimal='.', encoding="utf8")
df.reset_index(drop=True, inplace=True)
frames.append(df)
logging.debug(frames)
#------
# Concatenation of the monthly Dataframes into the yearly Dataframe:
try:
dataframe_yrly = pd.concat(frames, sort=False, ignore_index=True, verify_integrity=True)
except ValueError as e:
print("ValueError:", e)
#------
# Repairing the additional column wrongly generated in concatenation:
if np.where(np.isfinite(dataframe_yrly.iloc[:,34])):
dataframe_yrly["correto"]=dataframe_yrly.iloc[:,34]
else:
#pos=np.where(isnan())
dataframe_yrly["correto"]=dataframe_yrly.iloc[:,33]
dataframe_yrly_name=dataframe_name
#------
# Saving the new output DB's (rain and no rain):
dataframe_yrly.to_csv(os.path.join(path, dataframe_yrly_name),index=False,sep=",",decimal='.')
print("The file ", dataframe_yrly_name ," was genetared!")
return dataframe_yrly
def FitConcatenationDF(self, path, file):
dataframe = pd.read_csv(os.path.join(path, file), sep=',', decimal='.', encoding="utf8")
pos33=np.where(np.isnan(dataframe.iloc[:,33]))
val34=dataframe.iloc[:,34].iloc[pos33]
vec_correto=dataframe.iloc[:,33].fillna(val34)
dataframe["emis190V_OK"]=""
dataframe["emis190V_OK"]=vec_correto
dataframe_copy_OK=dataframe[['lat', 'lon', 'sfccode', 'T2m', 'tcwv', 'skint', 'sfcprcp',
'cnvprcp', '10V', '10H', '18V', '18H', '23V', '36V', '36H', '89V',
'89H', '166V', '166H', '186V', '190V', 'emis10V', 'emis10H', 'emis18V',
'emis18H', 'emis23V', 'emis36V', 'emis36H', 'emis89V', 'emis89H',
'emis166V', 'emis166H', 'emis186V',]].copy()
dataframe_copy_OK["emis190V"]=vec_correto
file_name=os.path.splitext(file)[0]+"_OK.csv"
dataframe_copy_OK.to_csv(os.path.join(path, file_name),index=False,sep=",",decimal='.')
print("The file ", file_name ," was genetared!")
return dataframe_copy_OK
mybrain = BRain()
#dataframe_yrly = mybrain.ConcatenationMonthlyDF(settings.RAIN_CSV, "Yearly_BR_rain_var2d.csv")
#dataframe_OK = mybrain.FitConcatenationDF(settings.RAIN_CSV, "Yearly_BR_rain_var2d.csv")
HelloFunction()
### Loop for CREATION of the regional and rain and norain dataframes.
# You can change the INPUT/OUTPUT PATH depending on your need:
#------------------------------------------------------------------------------
#for idx, elemento in enumerate(os.listdir(mybrain.IN_CSV_LIST)):
# print("posicao do loop: {} | elemento da pasta: {}".format(idx, elemento))
# dataframe_original = mybrain.LoadCSV(mybrain.IN_CSV_LIST, elemento)
# #-------------------------------------------------------------------------
# dataframe_regional = mybrain.ExtractRegion(dataframe_original)
# data=elemento[9:15]
# dataframe_reg_name="Regional_BR_"+data+"_var2d.csv"
# dataframe_regional.to_csv(os.path.join(mybrain.OUT_CSV_LIST, dataframe_reg_name),index=False,sep=",",decimal='.')
# #-------------------------------------------------------------------------
# dataframe_rain, dataframe_norain = mybrain.ThresholdRainNoRain(dataframe_regional)
# dataframe_rain_name="Regional_BR_rain_"+data+"_var2d.csv"
# dataframe_norain_name="Regional_BR_norain_"+data+"_var2d.csv"
# dataframe_rain.to_csv(os.path.join(mybrain.RAIN_CSV, dataframe_rain_name),index=False,sep=",",decimal='.')
# dataframe_norain.to_csv(os.path.join(mybrain.NORAIN_CSV, dataframe_norain_name),index=False,sep=",",decimal='.')
# print("The file ", dataframe_rain ," was genetared!")
# print("The file ", dataframe_norain ," was genetared!")
# dataframe_norain.to_csv(os.path.join(pathnorain, norainDB),index=False,sep=",",decimal='.')
# print("The file ", norainDB ," was genetared!")
#------------------------------------------------------------------------------
### Loop for CONCATENATION of the rain and norain dataframes in Yearly Dataframes:
# You can change the INPUT/OUTPUT PATH depending on your need:
#------------------------------------------------------------------------------
|
import json
import logging
from collections import defaultdict
from typing import Optional, Dict, List, Tuple
from model.jriver.common import get_channel_idx, user_channel_indexes
from model.jriver.filter import MixType, Mix, CompoundRoutingFilter, Filter, Gain, XOFilter
logger = logging.getLogger('jriver.routing')
LFE_ADJUST_KEY = 'l'
ROUTING_KEY = 'r'
EDITORS_KEY = 'e'
EDITOR_NAME_KEY = 'n'
UNDERLYING_KEY = 'u'
WAYS_KEY = 'w'
SYM_KEY = 's'
LFE_IN_KEY = 'x'
class NoMixChannelError(Exception):
pass
class Route:
def __init__(self, i: int, w: int, o: int, mt: Optional[MixType] = None):
self.i = i
self.w = w
self.o = o
self.mt = mt
def __repr__(self):
return f"{self.i}.{self.w} -> {self.o} {self.mt.name if self.mt else ''}"
class Matrix:
def __init__(self, inputs: Dict[str, int], outputs: List[str]):
self.__inputs = inputs
self.__outputs = outputs
self.__row_keys = self.__make_row_keys()
# input channel -> way -> output channel -> enabled
self.__ways: Dict[str, Dict[int, Dict[str, bool]]] = self.__make_default_ways()
def get_active_routes(self) -> List[Route]:
'''
:return: The active links from inputs to outputs defined by this matrix.
'''
return [Route(get_channel_idx(k1), k2, get_channel_idx(k3))
for k1, v1 in self.__ways.items() for k2, v2 in v1.items() for k3, v3 in v2.items() if v3]
def __make_default_ways(self) -> Dict[str, Dict[int, Dict[str, bool]]]:
return {i: {w: {c: False for c in self.__outputs} for w in range(ways)} for i, ways in self.__inputs.items()}
def __make_row_keys(self) -> List[Tuple[str, int]]:
return [(c, w) for c, ways in self.__inputs.items() for w in range(ways)]
@property
def rows(self):
return len(self.__row_keys)
def row_name(self, idx: int):
c, w = self.__row_keys[idx]
suffix = '' if self.__inputs[c] < 2 else f" - {w+1}"
return f"{c}{suffix}"
@property
def columns(self):
return len(self.__outputs)
def column_name(self, idx: int) -> str:
return self.__outputs[idx]
def toggle(self, row: int, column: int) -> str:
c, w = self.__row_keys[row]
output_channel = self.__outputs[column]
now_enabled = not self.__ways[c][w][output_channel]
self.__ways[c][w][output_channel] = now_enabled
error_msg = None
if now_enabled:
# TODO verify
# try:
# self.get_routes()
# except ValueError as e:
# logger.exception(f"Unable to activate route from {c}{w} to {output_channel}: circular dependency")
# error_msg = 'Unable to route, circular dependency'
# self.__ways[c][w][output_channel] = False
pass
return error_msg
def enable(self, channel: str, way: int, output: str):
self.__ways[channel][way][output] = True
def is_routed(self, row: int, column: int) -> bool:
c, w = self.__row_keys[row]
return self.__ways[c][w][self.__outputs[column]]
def __repr__(self):
return f"{self.__ways}"
def clone(self):
clone = Matrix(self.__inputs, self.__outputs)
clone.__copy_matrix_values(self.__ways)
return clone
def __copy_matrix_values(self, source: Dict[str, Dict[int, Dict[str, bool]]]):
for k1, v1 in source.items():
for k2, v2 in v1.items():
for k3, v3 in v2.items():
self.__ways[k1][k2][k3] = v3
def resize(self, channel: str, ways: int):
old_len = self.__inputs[channel]
if ways < old_len:
self.__inputs[channel] = ways
self.__row_keys = self.__make_row_keys()
for i in range(ways, old_len):
del self.__ways[channel][i]
elif ways > old_len:
self.__inputs[channel] = ways
self.__row_keys = self.__make_row_keys()
old_ways = self.__ways
self.__ways = self.__make_default_ways()
self.__copy_matrix_values(old_ways)
def get_mapping(self) -> Dict[str, Dict[int, str]]:
'''
:return: channel mapping as input channel -> way -> output channel
'''
mapping = defaultdict(dict)
for input_channel, v1 in self.__ways.items():
for way, v2 in v1.items():
for output_channel, routed in v2.items():
if routed:
prefix = f"{mapping[input_channel][way]};" if way in mapping[input_channel] else ''
mapping[input_channel][way] = f"{prefix}{get_channel_idx(output_channel)}"
return mapping
def encode(self) -> List[str]:
'''
:return: currently stored routings in encoded form.
'''
routings = []
for input_channel, v1 in self.__ways.items():
for way, v2 in v1.items():
for output_channel, routed in v2.items():
if routed:
routings.append(f"{input_channel}/{way}/{output_channel}")
return routings
def decode(self, routings: List[str]) -> None:
'''
Reloads the routing generated by encode.
:param routings: the routings.
'''
for input_channel, v1 in self.__ways.items():
for way, v2 in v1.items():
for output_channel in v2.keys():
v2[output_channel] = False
for r in routings:
i, w, o = r.split('/')
self.__ways[i][int(w)][o] = True
def is_input(self, channel: str):
return channel in self.__inputs.keys()
def get_free_output_channels(self) -> List[int]:
'''
:return: the output channels which have no assigned inputs.
'''
used_outputs = set([k3 for k1, v1 in self.__ways.items() for k2, v2 in v1.items() for k3, v3 in v2.items()
if v3])
return [get_channel_idx(o) for o in self.__outputs if o not in used_outputs]
def get_input_channels(self) -> List[int]:
return [get_channel_idx(i) for i in self.__inputs]
def __reorder_routes(routes: List[Route]) -> List[Route]:
'''
Reorders routing to ensure inputs are not overridden with outputs. Attempts to break circular dependencies
using user channels if possible.
:param routes: the routes.
:return: the reordered routes.
'''
ordered_routes: List[Tuple[Route, int]] = []
u1_channel_idx = user_channel_indexes()[0]
for r in routes:
def repack() -> Tuple[Route, int]:
return r, -1
# just add the first route as there is nothing to reorder
if not ordered_routes or not r.mt:
ordered_routes.append(repack())
else:
insert_at = -1
for idx, o_r in enumerate(ordered_routes):
# if a route wants to write to this input, make sure this route comes first
if o_r[0].o == r.i:
insert_at = idx
break
if insert_at == -1:
# the normal case, nothing to reorder so just add it
ordered_routes.append(repack())
else:
# search for circular dependencies, i.e. if this route wants to write to the input of a later route
# (and hence overwriting that input channel)
broke_circular: Optional[Route] = None
for o_r in ordered_routes[insert_at:]:
if o_r[0].i == r.o:
inserted_route = ordered_routes[insert_at]
# make sure we only copy to the user channel once
if inserted_route[0] != r.i or inserted_route[2] != u1_channel_idx:
ordered_routes.insert(insert_at, (Route(r.i, r.w, u1_channel_idx, MixType.COPY), r.o))
# cache the copy from the user channel to the actual output
broke_circular = Route(u1_channel_idx, r.w, r.o, r.mt if r.mt else MixType.COPY)
break
if broke_circular:
# append the route after the last use of the route output as an input
candidate_idx = -1
for idx, o_r in enumerate(ordered_routes):
if o_r[0].i == broke_circular.o:
candidate_idx = idx + 1
if candidate_idx == -1:
raise ValueError(f"Logical error, circular dependency detected but now missing")
elif candidate_idx == len(ordered_routes):
ordered_routes.append((broke_circular, -1))
else:
ordered_routes.insert(candidate_idx, (broke_circular, -1))
else:
# no circular dependency but make sure we insert before any copies to the user channel
if insert_at > 0:
inserted = ordered_routes[insert_at - 1]
if inserted[1] > -1 and inserted[0].i == r.i:
insert_at -= 1
ordered_routes.insert(insert_at, repack())
# validate that the proposed routes make sense
u1_in_use_for = -1
failed = False
output: List[Route] = []
for r, target in ordered_routes:
if target > -1:
if u1_in_use_for == -1:
u1_in_use_for = target
else:
if target != u1_in_use_for:
failed = True
if r.i == u1_channel_idx:
if r.o != u1_in_use_for:
failed = True
else:
u1_in_use_for = -1
output.append(r)
if failed:
# TODO this does not make sense with how bass management is implemented for stereo subs
logger.info(f'Unresolvable circular dependencies found in {ordered_routes}')
return output
def collate_routes(summed_routes_by_output: Dict[int, List[Route]]) -> List[Tuple[List[int], List[Route]]]:
'''
Collates output channels that are fed by identical sets of inputs.
:param summed_routes_by_output: the summed routes.
:return: the collated routes.
'''
summed_routes: List[Tuple[List[int], List[Route]]] = []
for output_channel, summed_route in summed_routes_by_output.items():
route_inputs = sorted([f"{r.i}_{r.w}" for r in summed_route])
matched = False
for alt_c, alt_r in summed_routes_by_output.items():
if alt_c != output_channel and not matched:
alt_inputs = sorted([f"{r.i}_{r.w}" for r in alt_r])
if alt_inputs == route_inputs:
matched = True
found = False
if summed_routes:
for s_r in summed_routes:
if alt_c in s_r[0] and not found:
found = True
s_r[0].append(output_channel)
if not found:
summed_routes.append(([output_channel], summed_route))
if not matched:
summed_routes.append(([output_channel], summed_route))
return summed_routes
def group_routes_by_output(matrix: Matrix):
'''
:param matrix: the matrix.
:return: the direct routes (1 input to 1 output), the summed routes grouped by output channel (multiple inputs going to a single output).
'''
summed_routes: Dict[int, List[Route]] = defaultdict(list)
for r in matrix.get_active_routes():
summed_routes[r.o].append(r)
direct_routes = [Route(v1.i, v1.w, v1.o, MixType.COPY)
for v in summed_routes.values() if len(v) == 1 for v1 in v if v1.i != v1.o]
summed_routes = {k: v for k, v in summed_routes.items() if len(v) > 1}
return direct_routes, summed_routes
def convert_to_routes(matrix: Matrix):
'''
:param matrix: the matrix.
:return: the routes.
'''
simple_routes, summed_routes_by_output = group_routes_by_output(matrix)
collated_summed_routes: List[Tuple[List[int], List[Route]]] = collate_routes(summed_routes_by_output)
return simple_routes, collated_summed_routes
def __create_summed_output_channel_for_shared_lfe(output_channels: List[int],
routes: List[Route],
main_adjust: int,
lfe_channel_idx: int,
empty_channels: List[int],
input_channels: List[int]) -> Tuple[List[Filter], List[Route]]:
'''
converts routing of an lfe based mix to a set of filters where the LFE channel has been included in more than 1
distinct combination of main channels.
:param output_channels: the output channels.
:param routes: the routes.
:param main_adjust: main level adjustment.
:param lfe_channel_idx: the lfe channel idx.
:param empty_channels: channels which can be used to stage outputs.
:return: the filters, additional direct routes (to copy the summed output to other channels).
'''
# calculate a target channel as any output channel which is not an input channel
target_channel: Optional[int] = next((c for c in output_channels if c not in input_channels), None)
if not target_channel:
# if no such channel can be found then take the next empty channel (unlikely situation in practice)
if empty_channels:
target_channel = empty_channels.pop(0)
else:
# if no free channels left then blow up
raise NoMixChannelError()
filters: List[Filter] = []
direct_routes: List[Route] = []
# add all routes to the output
for r in routes:
vals = {
**Mix.default_values(),
'Source': str(r.i),
'Destination': str(target_channel),
'Mode': str(MixType.COPY.value if r.i in output_channels else MixType.ADD.value)
}
if r.i != lfe_channel_idx and main_adjust != 0:
vals['Gain'] = f"{main_adjust:.7g}"
mix = Mix(vals)
if mix.mix_type == MixType.COPY and filters:
filters.insert(0, mix)
else:
filters.append(mix)
# copy from the mix target channel to the actual outputs
for c in output_channels:
if c != target_channel:
direct_routes.append(Route(target_channel, 0, c, MixType.COPY))
return filters, direct_routes
def __create_summed_output_channel_for_dedicated_lfe(output_channels: List[int], routes: List[Route], main_adjust: int,
lfe_channel_idx: int) -> Tuple[List[Filter], List[Route]]:
'''
converts routing of an lfe based mix to a set of filters when the LFE channel is not shared across multiple outputs.
:param output_channels: the output channels.
:param routes: the routes.
:param main_adjust: main level adjustment.
:param lfe_channel_idx: the lfe channel idx.
:return: the filters, additional direct routes (to copy the summed output to other channels).
'''
filters: List[Filter] = []
direct_routes: List[Route] = []
# accumulate main channels into the LFE channel with an appropriate adjustment
for r in routes:
if r.i != lfe_channel_idx:
vals = {
**Mix.default_values(),
'Source': str(r.i),
'Destination': str(lfe_channel_idx),
'Mode': str(MixType.ADD.value)
}
if r.i != lfe_channel_idx and main_adjust != 0:
vals['Gain'] = f"{main_adjust:.7g}"
filters.append(Mix(vals))
# copy the LFE channel to any required output channel
for c in output_channels:
if c != lfe_channel_idx:
direct_routes.append(Route(lfe_channel_idx, 0, c, MixType.COPY))
return filters, direct_routes
def calculate_compound_routing_filter(matrix: Matrix, editor_meta: Optional[List[dict]] = None,
xo_filters: List[XOFilter] = None, main_adjust: int = 0, lfe_adjust: int = 0,
lfe_channel_idx: Optional[int] = None) -> CompoundRoutingFilter:
'''
Calculates the filters required to route and bass manage, if necessary, the input channels.
:param matrix: the routing matrix.
:param editor_meta: extra editor metadata.
:param xo_filters: the XO filters.
:param main_adjust: the gain adjustment for a main channel when bass management is required.
:param lfe_adjust: the gain adjustment for the LFE channel when bass management is required.
:param lfe_channel_idx: the lfe channel index.
:return: the filters.
'''
direct_routes, summed_routes = group_routes_by_output(matrix)
empty_channels = user_channel_indexes() + matrix.get_free_output_channels()
summed_routes_by_output_channels: List[Tuple[List[int], List[Route]]] = collate_routes(summed_routes)
lfe_route_count = __count_lfe_routes(lfe_channel_idx, direct_routes, summed_routes_by_output_channels)
# Scenarios handled
# 1) Standard bass management
# = LFE channel is routed to the SW output with 1 or more other channels added to it
# - do nothing to the LFE channel (gain already reduced by lfe_adjust)
# - add other channels to the LFE channel
# 2) Standard bass management with LFE routed to some other output channel
# - as 1 with additional direct route to move the SW output to the additional channel
# 3) Standard bass management with multiple SW outputs
# - as 1 with additional direct route to copy the SW output to the additional channel
# 4) Stereo bass
# = LFE channel routed to >1 channel and combined differing sets of main channels
# - find a free channel to mix into
# - add all inputs to the channel
# - copy from here to the output channels
# 5) Non LFE based subwoofers
# = >1 main channels to be summed into some output channel and copied to some other channels
# - do direct routes first to copy the input channels to their target channels
# - mix into each summed channel
filters: List[Filter] = []
if lfe_adjust != 0 and lfe_channel_idx:
filters.append(Gain({
'Enabled': '1',
'Type': Gain.TYPE,
'Gain': f"{lfe_adjust:.7g}",
'Channels': str(lfe_channel_idx)
}))
for output_channels, summed_route in summed_routes_by_output_channels:
includes_lfe = lfe_channel_idx and any((r.i == lfe_channel_idx for r in summed_route))
if includes_lfe:
if lfe_route_count > 1:
route_filters, extra_routes = \
__create_summed_output_channel_for_shared_lfe(output_channels, summed_route, main_adjust,
lfe_channel_idx, empty_channels,
matrix.get_input_channels())
else:
route_filters, extra_routes = \
__create_summed_output_channel_for_dedicated_lfe(output_channels, summed_route, main_adjust,
lfe_channel_idx)
if extra_routes:
direct_routes.extend(extra_routes)
if route_filters:
filters.extend(route_filters)
else:
for c in output_channels:
for r in summed_route:
if r.i != r.o:
direct_routes.append(Route(r.i, r.w, c, MixType.ADD))
ordered_routes = __reorder_routes(direct_routes)
for o_r in ordered_routes:
filters.append(Mix({
**Mix.default_values(),
'Source': str(o_r.i),
'Destination': str(o_r.o),
'Mode': str(o_r.mt.value)
}))
meta = __create_routing_metadata(matrix, editor_meta, lfe_channel_idx, lfe_adjust)
return CompoundRoutingFilter(json.dumps(meta), filters, xo_filters)
def __count_lfe_routes(lfe_channel_idx, direct_routes, summed_routes_by_output_channels) -> int:
lfe_route_count = 0
if lfe_channel_idx:
direct_lfe_routes = len([r.i for r in direct_routes if r.i == lfe_channel_idx])
grouped_lfe_routes = len([1 for _, rs in summed_routes_by_output_channels
if any(r.i == lfe_channel_idx for r in rs)])
lfe_route_count += direct_lfe_routes + grouped_lfe_routes
if lfe_route_count > 1:
logger.debug(f"LFE is included in {lfe_route_count} routes")
return lfe_route_count
def __create_routing_metadata(matrix: Matrix, editor_meta: Optional[List[dict]], lfe_channel: Optional[int],
lfe_adjust: Optional[int]) -> dict:
meta = {
EDITORS_KEY: editor_meta if editor_meta else [],
ROUTING_KEY: matrix.encode(),
}
if lfe_channel:
meta[LFE_IN_KEY] = lfe_channel
if lfe_adjust:
meta[LFE_ADJUST_KEY] = lfe_adjust
return meta
|
import meshio
mesh = meshio.read('I:\Program/Pix2Vox-master/voxel_log/voxel_process/gv_mha_000000_up.vtu')
mesh.write("I:\Program/Pix2Vox-master/voxel_log/voxel_process/gv_mha_000000_up.obj")
|
from flask import Blueprint, jsonify, render_template, abort, request
import os
name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
maths = Blueprint(name, __name__, template_folder='templates')
import config
# Is value an int?
@maths.route('/is/int/<value>', methods=['GET'])
def is_int(value):
"""Receives value and returns if it looks and smells
like an int
Args:
value: the value to test for int-ness
Returns:
bool: JSON with True or False
"""
if config.debug: print('is_int', value, request.method, request.args)
try:
int(value)
return jsonify({'response':True})
except:
return jsonify({'response':False})
# Return module docs
@maths.route('/docs/{}'.format(name), methods=['GET'])
def docs():
return render_template('{}.html'.format(name))
|
import os
from twisted.trial import unittest
from twisted.internet.defer import Deferred
from zope.interface.verify import verifyObject
from scrapyd.interfaces import IPoller
from scrapyd.config import Config
from scrapyd.poller import QueuePoller
from scrapyd.utils import get_spider_queues
from mock import Mock
class QueuePollerTest(unittest.TestCase):
def setUp(self):
d = self.mktemp()
eggs_dir = os.path.join(d, 'eggs')
dbs_dir = os.path.join(d, 'dbs')
os.makedirs(eggs_dir)
os.makedirs(dbs_dir)
os.makedirs(os.path.join(eggs_dir, 'mybot1'))
os.makedirs(os.path.join(eggs_dir, 'mybot2'))
config = Config(values={'eggs_dir': eggs_dir, 'dbs_dir': dbs_dir})
self.queues = get_spider_queues(config)
self.poller = QueuePoller(config)
self.lancher_mock = Mock()
self.lancher_mock.processes = {}
def test_interface(self):
verifyObject(IPoller, self.poller)
def test_poll_next(self):
self.queues['mybot1'].add('spider1')
self.queues['mybot2'].add('spider2')
d1 = self.poller.next()
d2 = self.poller.next()
self.failUnless(isinstance(d1, Deferred))
self.failIf(hasattr(d1, 'result'))
self.poller.poll(self.lancher_mock)
self.queues['mybot1'].pop()
self.poller.poll(self.lancher_mock)
self.failUnlessEqual(d1.result, {'_project': 'mybot1', '_spider': 'spider1'})
self.failUnlessEqual(d2.result, {'_project': 'mybot2', '_spider': 'spider2'})
|
from deepbgc.commands.base import BaseCommand
from deepbgc.converter import SequenceToPfamCSVConverter
class PfamCommand(BaseCommand):
command = 'pfam'
help = """Convert genomic BGCs sequence into a pfam domain CSV file by detecting proteins and pfam domains.
Examples:
# Detect proteins and pfam domains in a FASTA sequence and save the result as csv file
deepbgc pfam --pfam Pfam-A.hmm inputSequence.fa outputPfamSequence.csv
"""
def __init__(self, args):
super().__init__(args)
self.input_path = args.input
self.output_path = args.output
self.converter = SequenceToPfamCSVConverter(db_path=args.pfam)
@classmethod
def add_subparser(cls, subparsers):
parser = super().add_subparser(subparsers)
# parser.add_argument('--mode', default='auto', choices=['auto', 'nucl', 'prot', 'pfam'],
# help="Input modes: \n"
# "--mode auto: Automatic based on file extension.\n"
# "--mode nucl: Nucleotide sequence without annotated genes. Will detect genes and pfam domains. \n"
# "--mode prot: Protein sequence. Will detect pfam domains.)")
parser.add_argument('-p', '--pfam', required=True, help="Pfam DB (Pfam-A.hmm) file path.")
parser.add_argument(dest='input', help="Input sequence file path.")
parser.add_argument(dest='output', help="Output pfam CSV file path.")
def run(self):
self.converter.convert(self.input_path, self.output_path)
print()
print('Saved Pfam CSV to: {}'.format(self.output_path))
|
from .dataset_source import DatasetSource
from .dataset_writer import DatasetWriter
|
#!/usr/bin/env python3
import pytest
from report_generator.partner import sponsor
class TestSponsor:
@pytest.fixture(scope="class")
def sponsors(self):
return sponsor.get_all_sponsors("test/data/packages.yaml", "test/data/sponsors.yaml")
@pytest.fixture(scope="class")
def platinum_partner(self, sponsors):
return [sponsor for sponsor in sponsors if sponsor.name == "PSF"][0]
def test_sponsor_number(self, sponsors):
assert len(sponsors) == 1
def test_sponsor_name(self, platinum_partner):
assert platinum_partner.name == "PSF"
def test_sponsor_promotion_web_click(self, platinum_partner):
assert platinum_partner.web_click == 999
def test_sponsor_promotion_web_click_rank_platinum(self, platinum_partner):
assert platinum_partner.web_click_rank == 1
@pytest.mark.skip("No bronze sponsor in test case")
def test_sponsor_promotion_web_click_rank_bronze(self):
answer = sponsor.NA_CONTENT_MESSAGE
self.assertEqual(self.bronze_sponsor.web_click_rank, answer)
@pytest.mark.skip("No bronze sponsor in test case")
def test_sponsor_promotion_fb_len(self):
self.assertEqual(len(self.platinum_partner.facebook_url), 3)
@pytest.mark.skip("No bronze sponsor in test case")
def test_sponsor_promotion_fb_url_reach(self):
url_link = "https://www.facebook.com/pycontw/posts/1657384737713695"
target_url = self.platinum_partner.facebook_url[url_link]
self.assertEqual(target_url["reach"], 1000)
@pytest.mark.skip("No bronze sponsor in test case")
def test_sponsor_promotion_fb_url_engagement(self):
url_link = "https://www.facebook.com/pycontw/posts/1657384737713695"
target_url = self.platinum_partner.facebook_url[url_link]
self.assertEqual(target_url["engagement"], 2000)
|
# Generated by Django 3.0.5 on 2020-04-11 22:34
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OitsParams',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('status', models.CharField(choices=[('N', 'New'), ('P', 'Processing'), ('C', 'Complete')], default='N', max_length=1)),
('parameters', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddIndex(
model_name='oitsparams',
index=models.Index(fields=['created_at'], name='oits_params_created_706c87_idx'),
),
]
|
import numpy as np
class DynSys():
"""
The dynamical system class.
Author: Haimin Hu (haiminh@princeton.edu)
Reference: Ellipsoidal Toolbox (MATLAB) by Dr. Alex Kurzhanskiy.
Supports:
DTLTI: Discrete-time linear time-invariant system.
x[k+1] = A x[k] + B u[k] + c + G d[k]
DTLTV: Discrete-time linear time-varying system.
x[k+1] = A[k] x[k] + B[k] u[k] + c[k] + G[k] d[k]
CTLTI: Continuous-time linear time-invariant system (not yet implemented).
dx/dt = A x(t) + B u(t) + c + G d(t)
CTLTV: Continuous-time linear time-varying system (not yet implemented).
dx/dt = A(t) x(t) + B(t) u(t) + c(t) + G(t) d(t)
NNCS: Neural-network control system (not yet implemented).
x - state, vector in R^n.
u - control, vector in R^m.
c - constant offset, vector in R^n.
d - disturbance, vector in R^l.
A - system matrix, in R^(nxn).
B - control matrix, in R^(nxm).
G - disturbance matrix, in R^(nxl).
Todo list:
- Accout for output map and noise: y(t) = C(t) x(t) + w(t).
"""
def __init__(self, sys_type, A, B, c=np.array([]), G=np.array([]), T=0):
"""
Constructor for dynamical system object.
Args:
sys_type (str): system type.
A (np.ndarray or a list of np.ndarray): system matrix.
B (np.ndarray or a list of np.ndarray): control matrix.
c (np.ndarray or a list of np.ndarray, optional): offset vector.
G (np.ndarray or a list of np.ndarray, optional): disturbance matrix.
T (int): time horizon (for time-varying systems).
"""
# Discrete-time linear time-invariant system (DTLTI).
if sys_type == 'DTLTI':
self.sys_type = 'DTLTI'
# A matrix
if not isinstance(A, np.ndarray):
raise ValueError(
"[ellReach-DynSys] A must be an np.ndarray for DTLTI systems."
)
n = A.shape[0]
if n != A.shape[1]:
raise ValueError("[ellReach-DynSys] A must be a square matrix.")
self.A = A
# B matrix
if np.size(B) > 0:
if not isinstance(B, np.ndarray):
raise ValueError(
"[ellReach-DynSys] B must be an np.ndarray for DTLTI systems."
)
if n != B.shape[0]:
raise ValueError(
"[ellReach-DynSys] Dimensions of A and B do not match."
)
self.B = B
# c vector
if np.size(c) == 0:
self.c = np.zeros((n, 1))
else:
if not isinstance(c, np.ndarray):
raise ValueError(
"[ellReach-DynSys] c must be an np.ndarray for DTLTI systems."
)
if n != c.shape[0]:
raise ValueError(
"[ellReach-DynSys] Dimensions of A and c do not match."
)
self.c = c
# G matrix
if np.size(G) > 0:
if not isinstance(G, np.ndarray):
raise ValueError(
"[ellReach-DynSys] G must be an np.ndarray for DTLTI systems."
)
if n != G.shape[0]:
raise ValueError(
"[ellReach-DynSys] Dimensions of A and G do not match."
)
self.G = G
# Discrete-time linear time-varying system (DTLTV).
elif sys_type == 'DTLTV':
self.sys_type = 'DTLTV'
if not isinstance(T, int) or not T > 0:
raise ValueError("[ellReach-DynSys] T must be a positive integer.")
self.T = T
# A matrices
if not isinstance(A, list):
raise ValueError(
"[ellReach-DynSys] A must be a list for DTLTV systems."
)
if len(A) != T-1:
raise ValueError("[ellReach-DynSys] T and length of A do not match.")
n = A[0].shape[0]
self.A = A
# B matrices
if np.size(B) > 0:
if not isinstance(B, list):
raise ValueError(
"[ellReach-DynSys] B must be a list for DTLTV systems."
)
if len(B) != T-1:
raise ValueError("[ellReach-DynSys] T and length of B do not match.")
self.B = B
# c vectors
if np.size(c) == 0:
self.c = [np.zeros((n, 1))] * T
else:
if not isinstance(c, list):
raise ValueError(
"[ellReach-DynSys] c must be a list for DTLTV systems."
)
if len(c) != T-1:
raise ValueError("[ellReach-DynSys] T and length of c do not match.")
self.c = c
# G matrices
if np.size(G) > 0:
if not isinstance(G, list):
raise ValueError(
"[ellReach-DynSys] G must be a list for DTLTV systems."
)
if len(G) != T-1:
raise ValueError("[ellReach-DynSys] T and length of G do not match.")
self.G = G
else:
raise ValueError("[ellReach-DynSys] Unsupported system type.")
def display(self):
"""
Displays information of the DynSys object.
"""
print("\n")
print("System type: ", self.sys_type)
if self.sys_type == 'DTLTI':
print("A matrix: \n", self.A)
if not self.autonomous():
print("B matrix: \n", self.B)
else:
print("This is an autonomous system.")
print("c vector: \n", self.c)
if not self.no_dstb():
print("G matrix: \n", self.G)
else:
print("This system has no disturbance.")
elif self.sys_type == 'DTLTV':
print("Horizon T =", self.T)
if self.autonomous():
print("This is an autonomous system.")
if self.no_dstb():
print("This system has no disturbance.")
print("\n")
def time_varying(self):
"""
Check if the system is time-varying.
"""
if self.sys_type == 'DTLTV' or self.sys_type == 'CTLTV':
return True
else:
return False
def autonomous(self):
"""
Check if the system is autonomous (empty B matrix).
"""
if np.size(self.B) == 0:
return True
else:
return False
def no_dstb(self):
"""
Check if the system has no distrubances (empty G matrix).
"""
if np.size(self.G) == 0:
return True
else:
return False
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float64MultiArray
import math
pub = rospy.Publisher('/adaptive_lighting/light_driver/pwm', Float64MultiArray, queue_size=10)
rospy.init_node('sin_test')
t = 0.0
intensity = 1.0;
rate = 200.0
r = rospy.Rate(rate)
while not rospy.is_shutdown():
msg = Float64MultiArray()
val = intensity * abs(math.sin(t));
val_shift = intensity * abs(math.cos(t));
currents = [0, 0, val, val_shift]
msg.data = currents
pub.publish(msg)
t = t + 1.0/rate;
r.sleep()
|
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2018 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from elasticsearch import Elasticsearch
from pygeoapi.provider.base import BaseProvider
class ElasticsearchProvider(BaseProvider):
"""Elasticsearch Provider"""
def __init__(self, definition):
"""initializer"""
BaseProvider.__init__(self, definition)
url_tokens = self.url.split('/')
self.index_name = url_tokens[-2]
self.type_name = url_tokens[-1]
self.es_host = url_tokens[2]
self.es = Elasticsearch(self.es_host)
def query(self, startindex=0, count=10, resulttype='results'):
"""
query ES
:returns: dict of 0..n GeoJSON features
"""
feature_collection = {
'type': 'FeatureCollection',
'features': []
}
results = self.es.search(index=self.index_name, from_=startindex,
size=count)
if resulttype == 'hits':
feature_collection['numberMatched'] = results['hits']['total']
return feature_collection
for feature in results['hits']['hits']:
id_ = feature['_source']['properties']['identifier']
feature['_source']['ID'] = id_
feature_collection['features'].append(feature['_source'])
return feature_collection
def get(self, identifier):
"""
Get ES document by id
:param identifier: feature id
:returns: dict of single GeoJSON feature
"""
try:
result = self.es.get(self.index_name, doc_type=self.type_name,
id=identifier)
id_ = result['_source']['properties']['identifier']
result['_source']['ID'] = id_
except Exception as err:
return None
return result['_source']
def __repr__(self):
return '<ElasticsearchProvider> {}'.format(self.url)
|
## Copyright (c) 2010, Coptix, Inc. All rights reserved.
## See the LICENSE file for license terms and warranty disclaimer.
"""prelude -- extra builtins"""
from __future__ import absolute_import
import os, __builtin__ as py, contextlib
import abc, functools as fn, logging, collections as coll, itertools as it
__all__ = (
'abc', 'log', 'setattrs', 'basename', 'dirname',
'Sequence', 'deque', 'first', 'chain', 'groupby', 'imap', 'izip', 'ichain',
'ifilter', 'filter', 'append', 'extend',
'Mapping', 'ddict', 'namedtuple', 'items', 'keys', 'values', 'chain_items',
'get', 'setitems', 'update', 'setdefault', 'ipop', 'pop',
'partial', 'wraps', 'thunk', 'contextmanager'
)
### General
dirname = os.path.dirname
basename = os.path.basename
def setattrs(obj, items=None, **kwargs):
for (key, val) in chain_items(items, kwargs):
setattr(obj, key, val)
return obj
### Logging
log = logging.getLogger(basename(dirname(__file__)))
log.addHandler(logging.StreamHandler())
### Sequences
Sequence = coll.Sequence
deque = coll.deque
chain = it.chain
groupby = it.groupby
imap = it.imap
def first(seq, default=None):
return next(seq, default)
def filter(pred, seq=None):
return py.filter(None, pred) if seq is None else py.filter(pred, seq)
def ifilter(pred, seq=None):
return it.ifilter(bool, pred) if seq is None else it.ifilter(pred, seq)
def izip(*args, **kwargs):
return (it.izip_longest if kwargs else it.izip)(*args, **kwargs)
def ichain(sequences):
return (x for s in sequences for x in s)
def append(obj, seq):
for item in seq:
obj.append(item)
return obj
def extend(obj, seq):
obj.extend(seq)
return obj
### Mappings
Mapping = coll.Mapping
ddict = coll.defaultdict
namedtuple = coll.namedtuple
def keys(seq):
if isinstance(seq, Mapping):
return seq.iterkeys()
return (k for (k, _) in items(seq))
def values(seq):
if isinstance(seq, Mapping):
return seq.itervalues()
return (v for (_, v) in items(seq))
def items(obj):
if isinstance(obj, Mapping):
return obj.iteritems()
return obj
def chain_items(*obj):
return ichain(items(o) for o in obj if o is not None)
def setitems(obj, items=None, **kwargs):
for (key, val) in chain_items(items, kwargs):
obj[key] = val
return obj
def get(obj, key, default=None):
if hasattr(obj, 'get'):
return obj.get(key, default)
return next((v for (k, v) in obj if k == key), default)
def update(obj, *args, **kwargs):
obj.update(*args, **kwargs)
return obj
def setdefault(obj, items=None, **kwargs):
for (key, val) in chain_items(items, kwargs):
obj.setdefault(key, val)
return obj
def ipop(obj, *keys, **kwargs):
default = kwargs.get('default')
return ((k, obj.pop(k, default)) for k in keys)
def pop(obj, *keys, **kwargs):
default = kwargs.get('default')
if len(keys) == 1:
return obj.pop(keys[0], default)
return (obj.pop(k, default) for k in keys)
### Procedures
partial = fn.partial
wraps = fn.wraps
contextmanager = contextlib.contextmanager
class thunk(object):
"""Like partial, but ignores any new arguments."""
__slots__ = ('func', 'args', 'keywords')
def __init__(self, func, *args, **keywords):
self.func = func
self.args = args
self.keywords = keywords
def __repr__(self):
return '<%s %r args=%r kwargs=%r>' % (
type(self).__name__,
self.func,
self.args,
self.keywords
)
def __call__(self, *args, **kwargs):
return self.func(*self.args, **self.keywords)
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ATI_fragment_shader'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ATI_fragment_shader',error_checker=_errors._error_checker)
GL_2X_BIT_ATI=_C('GL_2X_BIT_ATI',0x00000001)
GL_4X_BIT_ATI=_C('GL_4X_BIT_ATI',0x00000002)
GL_8X_BIT_ATI=_C('GL_8X_BIT_ATI',0x00000004)
GL_ADD_ATI=_C('GL_ADD_ATI',0x8963)
GL_BIAS_BIT_ATI=_C('GL_BIAS_BIT_ATI',0x00000008)
GL_BLUE_BIT_ATI=_C('GL_BLUE_BIT_ATI',0x00000004)
GL_CND0_ATI=_C('GL_CND0_ATI',0x896B)
GL_CND_ATI=_C('GL_CND_ATI',0x896A)
GL_COLOR_ALPHA_PAIRING_ATI=_C('GL_COLOR_ALPHA_PAIRING_ATI',0x8975)
GL_COMP_BIT_ATI=_C('GL_COMP_BIT_ATI',0x00000002)
GL_CON_0_ATI=_C('GL_CON_0_ATI',0x8941)
GL_CON_10_ATI=_C('GL_CON_10_ATI',0x894B)
GL_CON_11_ATI=_C('GL_CON_11_ATI',0x894C)
GL_CON_12_ATI=_C('GL_CON_12_ATI',0x894D)
GL_CON_13_ATI=_C('GL_CON_13_ATI',0x894E)
GL_CON_14_ATI=_C('GL_CON_14_ATI',0x894F)
GL_CON_15_ATI=_C('GL_CON_15_ATI',0x8950)
GL_CON_16_ATI=_C('GL_CON_16_ATI',0x8951)
GL_CON_17_ATI=_C('GL_CON_17_ATI',0x8952)
GL_CON_18_ATI=_C('GL_CON_18_ATI',0x8953)
GL_CON_19_ATI=_C('GL_CON_19_ATI',0x8954)
GL_CON_1_ATI=_C('GL_CON_1_ATI',0x8942)
GL_CON_20_ATI=_C('GL_CON_20_ATI',0x8955)
GL_CON_21_ATI=_C('GL_CON_21_ATI',0x8956)
GL_CON_22_ATI=_C('GL_CON_22_ATI',0x8957)
GL_CON_23_ATI=_C('GL_CON_23_ATI',0x8958)
GL_CON_24_ATI=_C('GL_CON_24_ATI',0x8959)
GL_CON_25_ATI=_C('GL_CON_25_ATI',0x895A)
GL_CON_26_ATI=_C('GL_CON_26_ATI',0x895B)
GL_CON_27_ATI=_C('GL_CON_27_ATI',0x895C)
GL_CON_28_ATI=_C('GL_CON_28_ATI',0x895D)
GL_CON_29_ATI=_C('GL_CON_29_ATI',0x895E)
GL_CON_2_ATI=_C('GL_CON_2_ATI',0x8943)
GL_CON_30_ATI=_C('GL_CON_30_ATI',0x895F)
GL_CON_31_ATI=_C('GL_CON_31_ATI',0x8960)
GL_CON_3_ATI=_C('GL_CON_3_ATI',0x8944)
GL_CON_4_ATI=_C('GL_CON_4_ATI',0x8945)
GL_CON_5_ATI=_C('GL_CON_5_ATI',0x8946)
GL_CON_6_ATI=_C('GL_CON_6_ATI',0x8947)
GL_CON_7_ATI=_C('GL_CON_7_ATI',0x8948)
GL_CON_8_ATI=_C('GL_CON_8_ATI',0x8949)
GL_CON_9_ATI=_C('GL_CON_9_ATI',0x894A)
GL_DOT2_ADD_ATI=_C('GL_DOT2_ADD_ATI',0x896C)
GL_DOT3_ATI=_C('GL_DOT3_ATI',0x8966)
GL_DOT4_ATI=_C('GL_DOT4_ATI',0x8967)
GL_EIGHTH_BIT_ATI=_C('GL_EIGHTH_BIT_ATI',0x00000020)
GL_FRAGMENT_SHADER_ATI=_C('GL_FRAGMENT_SHADER_ATI',0x8920)
GL_GREEN_BIT_ATI=_C('GL_GREEN_BIT_ATI',0x00000002)
GL_HALF_BIT_ATI=_C('GL_HALF_BIT_ATI',0x00000008)
GL_LERP_ATI=_C('GL_LERP_ATI',0x8969)
GL_MAD_ATI=_C('GL_MAD_ATI',0x8968)
GL_MOV_ATI=_C('GL_MOV_ATI',0x8961)
GL_MUL_ATI=_C('GL_MUL_ATI',0x8964)
GL_NEGATE_BIT_ATI=_C('GL_NEGATE_BIT_ATI',0x00000004)
GL_NUM_FRAGMENT_CONSTANTS_ATI=_C('GL_NUM_FRAGMENT_CONSTANTS_ATI',0x896F)
GL_NUM_FRAGMENT_REGISTERS_ATI=_C('GL_NUM_FRAGMENT_REGISTERS_ATI',0x896E)
GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI=_C('GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI',0x8973)
GL_NUM_INSTRUCTIONS_PER_PASS_ATI=_C('GL_NUM_INSTRUCTIONS_PER_PASS_ATI',0x8971)
GL_NUM_INSTRUCTIONS_TOTAL_ATI=_C('GL_NUM_INSTRUCTIONS_TOTAL_ATI',0x8972)
GL_NUM_LOOPBACK_COMPONENTS_ATI=_C('GL_NUM_LOOPBACK_COMPONENTS_ATI',0x8974)
GL_NUM_PASSES_ATI=_C('GL_NUM_PASSES_ATI',0x8970)
GL_QUARTER_BIT_ATI=_C('GL_QUARTER_BIT_ATI',0x00000010)
GL_RED_BIT_ATI=_C('GL_RED_BIT_ATI',0x00000001)
GL_REG_0_ATI=_C('GL_REG_0_ATI',0x8921)
GL_REG_10_ATI=_C('GL_REG_10_ATI',0x892B)
GL_REG_11_ATI=_C('GL_REG_11_ATI',0x892C)
GL_REG_12_ATI=_C('GL_REG_12_ATI',0x892D)
GL_REG_13_ATI=_C('GL_REG_13_ATI',0x892E)
GL_REG_14_ATI=_C('GL_REG_14_ATI',0x892F)
GL_REG_15_ATI=_C('GL_REG_15_ATI',0x8930)
GL_REG_16_ATI=_C('GL_REG_16_ATI',0x8931)
GL_REG_17_ATI=_C('GL_REG_17_ATI',0x8932)
GL_REG_18_ATI=_C('GL_REG_18_ATI',0x8933)
GL_REG_19_ATI=_C('GL_REG_19_ATI',0x8934)
GL_REG_1_ATI=_C('GL_REG_1_ATI',0x8922)
GL_REG_20_ATI=_C('GL_REG_20_ATI',0x8935)
GL_REG_21_ATI=_C('GL_REG_21_ATI',0x8936)
GL_REG_22_ATI=_C('GL_REG_22_ATI',0x8937)
GL_REG_23_ATI=_C('GL_REG_23_ATI',0x8938)
GL_REG_24_ATI=_C('GL_REG_24_ATI',0x8939)
GL_REG_25_ATI=_C('GL_REG_25_ATI',0x893A)
GL_REG_26_ATI=_C('GL_REG_26_ATI',0x893B)
GL_REG_27_ATI=_C('GL_REG_27_ATI',0x893C)
GL_REG_28_ATI=_C('GL_REG_28_ATI',0x893D)
GL_REG_29_ATI=_C('GL_REG_29_ATI',0x893E)
GL_REG_2_ATI=_C('GL_REG_2_ATI',0x8923)
GL_REG_30_ATI=_C('GL_REG_30_ATI',0x893F)
GL_REG_31_ATI=_C('GL_REG_31_ATI',0x8940)
GL_REG_3_ATI=_C('GL_REG_3_ATI',0x8924)
GL_REG_4_ATI=_C('GL_REG_4_ATI',0x8925)
GL_REG_5_ATI=_C('GL_REG_5_ATI',0x8926)
GL_REG_6_ATI=_C('GL_REG_6_ATI',0x8927)
GL_REG_7_ATI=_C('GL_REG_7_ATI',0x8928)
GL_REG_8_ATI=_C('GL_REG_8_ATI',0x8929)
GL_REG_9_ATI=_C('GL_REG_9_ATI',0x892A)
GL_SATURATE_BIT_ATI=_C('GL_SATURATE_BIT_ATI',0x00000040)
GL_SECONDARY_INTERPOLATOR_ATI=_C('GL_SECONDARY_INTERPOLATOR_ATI',0x896D)
GL_SUB_ATI=_C('GL_SUB_ATI',0x8965)
GL_SWIZZLE_STQ_ATI=_C('GL_SWIZZLE_STQ_ATI',0x8977)
GL_SWIZZLE_STQ_DQ_ATI=_C('GL_SWIZZLE_STQ_DQ_ATI',0x8979)
GL_SWIZZLE_STRQ_ATI=_C('GL_SWIZZLE_STRQ_ATI',0x897A)
GL_SWIZZLE_STRQ_DQ_ATI=_C('GL_SWIZZLE_STRQ_DQ_ATI',0x897B)
GL_SWIZZLE_STR_ATI=_C('GL_SWIZZLE_STR_ATI',0x8976)
GL_SWIZZLE_STR_DR_ATI=_C('GL_SWIZZLE_STR_DR_ATI',0x8978)
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glAlphaFragmentOp1ATI(op,dst,dstMod,arg1,arg1Rep,arg1Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glAlphaFragmentOp2ATI(op,dst,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glAlphaFragmentOp3ATI(op,dst,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod,arg3,arg3Rep,arg3Mod):pass
@_f
@_p.types(None,)
def glBeginFragmentShaderATI():pass
@_f
@_p.types(None,_cs.GLuint)
def glBindFragmentShaderATI(id):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glColorFragmentOp1ATI(op,dst,dstMask,dstMod,arg1,arg1Rep,arg1Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glColorFragmentOp2ATI(op,dst,dstMask,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint,_cs.GLuint)
def glColorFragmentOp3ATI(op,dst,dstMask,dstMod,arg1,arg1Rep,arg1Mod,arg2,arg2Rep,arg2Mod,arg3,arg3Rep,arg3Mod):pass
@_f
@_p.types(None,_cs.GLuint)
def glDeleteFragmentShaderATI(id):pass
@_f
@_p.types(None,)
def glEndFragmentShaderATI():pass
@_f
@_p.types(_cs.GLuint,_cs.GLuint)
def glGenFragmentShadersATI(range):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum)
def glPassTexCoordATI(dst,coord,swizzle):pass
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLenum)
def glSampleMapATI(dst,interp,swizzle):pass
@_f
@_p.types(None,_cs.GLuint,arrays.GLfloatArray)
def glSetFragmentShaderConstantATI(dst,value):pass
|
import sys
import os
from .constants import VERSION, DEFAULT_PROCESSES, CONFIG_FILE_NAMES, PROJECT_BOUNDARIES
from .backports import Backports
from .features import Features
from .config import Config
from .printing import nprint
from . import formats
class Arguments:
def __init__(self, args):
self.__args = args
@staticmethod
def print_usage(full=False):
print("Vermin {}".format(VERSION))
print("Usage: {} [options] <python source files and folders..>".format(sys.argv[0]))
print("\nConcurrently detect the minimum Python versions needed to run code.")
if not full:
print("\nFor full help and options, use `-h` or `--help`.")
print("\nHeuristics are employed to determine which files to analyze:\n"
" - 'py', 'py3', 'pyw', 'pyj', 'pyi' are always scanned\n"
" - 'pyc', 'pyd', 'pxd', 'pyx', 'pyo' are ignored (including various other files)\n"
" - Magic lines with 'python' are accepted, like: #!/usr/bin/env python\n"
" - Files that cannot be opened for reading as text devices are ignored")
print("\nHowever, files directly specified are always attempted parsing, even without\n"
"accepted extensions or heuristics.")
print("\nResults interpretation:")
print(" ~2 No known reason it won't work with py2.")
print(" !2 It is known that it won't work with py2.")
print(" 2.5, !3 Works with 2.5+ but it is known it won't work with py3.")
print(" ~2, 3.4 No known reason it won't work with py2, works with 3.4+")
print("\nIncompatible versions notices mean that several files were detected incompatible\n"
"with py2 and py3 simultaneously. In such cases the results might be inconclusive.")
print("\nA config file is automatically tried detected from the current working directory\n"
"where Vermin is run, following parent folders until either the root or project\n"
"boundary files/folders are reached. However, if --config-file is specified, no config\n"
"is auto-detected and loaded.")
if full:
print("\nConfig file names being looked for: {}\n"
"Project boundary files/folders: {}".
format(", ".join(["'{}'".format(fn) for fn in CONFIG_FILE_NAMES]),
", ".join(["'{}'".format(pb) for pb in PROJECT_BOUNDARIES])))
print("\nOptions:")
print(" --quiet | -q\n"
" Quiet mode. If used together with --violations, quiet mode is preserved\n"
" while showing only violations: no descriptive text, tips, or verdicts.\n")
print(" --no-quiet (default)\n"
" Disable quiet mode.\n")
print(" -v.. Verbosity level 1 to 4. -v, -vv, -vvv, and -vvvv shows increasingly more\n"
" information.\n"
" -v will show the individual versions required per file.\n"
" -vv will also show which modules, functions etc. that constitutes\n"
" the requirements.\n"
" -vvv will also show line/col numbers.\n"
" -vvvv will also show user-defined symbols being ignored.\n")
print(" --target=V | -t=V\n"
" Target version that files must abide by. Can be specified once or twice.\n"
" A '-' can be appended to match target version or smaller, like '-t=3.5-'.\n"
" If not met Vermin will exit with code 1. Note that the amount of target\n"
" versions must match the amount of minimum required versions detected.\n"
" However, if used in conjunction with --violations, and no rules are\n"
" triggered, it will exit with code 0.\n")
print(" --no-target (default)\n"
" Don't expect certain target version(s).\n")
print(" --processes=N | -p=N\n"
" Use N concurrent processes to detect and analyze files. Defaults to all\n"
" cores ({}).\n".format(DEFAULT_PROCESSES))
print(" --ignore | -i\n"
" Ignore incompatible versions and warnings. However, if no compatible\n"
" versions are found then incompatible versions will be shown in the end to\n"
" not have an absence of results.\n")
print(" --no-ignore (default)\n"
" Don't ignore incompatible versions and warnings.\n")
print(" --dump | -d\n"
" Dump AST node visits.\n")
print(" --no-dump (default)\n"
" Don't dump AST node visits.")
print("\n --help | -h\n"
" Shows this information and exists.")
print("\n --version | -V\n"
" Shows version number and exits.")
print("\n --config-file <path> | -c <path>\n"
" Loads config file unless --no-config-file is specified. Any additional\n"
" arguments supplied are applied on top of that config. See configuration\n"
" section above for more information.")
print("\n --no-config-file\n"
" No automatic config file detection and --config-file argument is disallowed.")
print("\n --hidden\n"
" Analyze 'hidden' files and folders starting with '.'.")
print("\n --no-hidden (default)\n"
" Don't analyze hidden files and folders unless specified directly.")
print("\n --versions\n"
" In the end, print all unique versions required by the analysed code.")
print("\n --show-tips (default)\n"
" Show helpful tips at the end, like those relating to backports or usage of\n"
" unevaluated generic/literal annotations.")
print("\n --no-tips\n"
" Don't show tips.")
print("\n --violations | --lint\n"
" Show only results that violate versions described by --target arguments,\n"
" which are required to be specified. Verbosity mode is automatically set to\n"
" at least 2 in order to show violations in output text, but can be increased\n"
" if necessary.\n\n"
" If no rules are triggered while used in conjunction with --target, an exit\n"
" code 0 will still be yielded due to inconclusivity.\n\n"
" Can be used together with --quiet such that only the violations are shown:\n"
" no descriptive text, tips, or verdicts.")
print("\n --no-violations | --no-lint (default)\n"
" Show regular results.")
print("\n --pessimistic\n"
" Pessimistic mode: syntax errors are interpreted as the major Python version\n"
" in use being incompatible.")
print("\n --no-pessimistic (default)\n"
" Disable pessimistic mode.")
print("\n --eval-annotations\n"
" Instructs parser that annotations will be manually evaluated in code, which\n"
" changes minimum versions in certain cases. Otherwise, function and variable\n"
" annotations are not evaluated at definition time. Apply this argument if\n"
" code uses `typing.get_type_hints` or `eval(obj.__annotations__)` or\n"
" otherwise forces evaluation of annotations.")
print("\n --no-eval-annotations (default)\n"
" Disable annotations evaluation.")
print("\n --parse-comments (default)\n"
" Parse for comments to influence exclusion of code for analysis via\n"
" \"# novm\" and \"# novermin\".")
print("\n --no-parse-comments\n"
" Don't parse for comments. Not parsing comments can sometimes yield a speedup\n"
" of 30-40%+.")
print("\n --scan-symlink-folders\n"
" Scan symlinks to folders to include in analysis.")
print("\n --no-symlink-folders (default)\n"
" Don't scan symlinks to folders to include in analysis. Symlinks\n"
" to non-folders or top-level folders will always be scanned.")
print("\n --format <name> | -f <name>\n"
" Format to show results and output in.\n"
" Supported formats:\n{}".format(formats.help_str(10)))
print("\n [--exclude <name>] ...\n"
" Exclude full names, like 'email.parser.FeedParser', from analysis. Useful to\n"
" ignore conditional logic that can trigger incompatible results.\n\n"
" Examples:\n"
" Exclude 'foo.bar.baz' module/member: --exclude 'foo.bar.baz'\n"
" Exclude 'foo' kwarg: --exclude 'somemodule.func(foo)'\n"
" Exclude 'bar' codecs error handler: --exclude 'ceh=bar'\n"
" Exclude 'baz' codecs encoding: --exclude 'ce=baz'")
print("\n [--exclude-file <file name>] ...\n"
" Exclude full names like --exclude but from a specified file instead. Each\n"
" line constitutes an exclusion with the same format as with --exclude.")
print("\n --no-exclude (default)\n"
" Use no excludes. Clears any excludes specified before this.")
print("\n [--backport <name>] ...\n"
" Some features are sometimes backported into packages, in repositories such\n"
" as PyPi, that are widely used but aren't in the standard language. If such a\n"
" backport is specified as being used, the results will reflect that instead."
"\n\n"
" Supported backports:\n{}".format(Backports.str(10)))
print("\n --no-backport (default)\n"
" Use no backports. Clears any backports specified before this.")
print("\n [--feature <name>] ...\n"
" Some features are disabled by default due to being unstable:\n{}".
format(Features.str(10)))
print("\n --no-feature (default)\n"
" Use no features. Clears any features specified before this.")
def parse(self, config, detect_folder=None):
assert(config is not None)
if len(self.__args) == 0:
return {"code": 1, "usage": True, "full": False}
path_pos = 0
versions = False
fmt = None
detected_config = Config.detect_config_file(detect_folder)
argument_config = None
no_config_file = False
# Preparsing step. Help and version arguments quit immediately and config file parsing must be
# done first such that other arguments can override its settings.
for i in range(len(self.__args)):
arg = self.__args[i]
if arg in ("--help", "-h"):
return {"code": 0, "usage": True, "full": True}
if arg in ("--version", "-V"):
print(VERSION)
sys.exit(0)
if arg == "--no-config-file":
no_config_file = True
detected_config = None
if arg in ("--config-file", "-c"):
if (i + 1) >= len(self.__args):
print("Requires config file path! Example: --config-file /path/to/vermin.ini")
return {"code": 1}
argument_config = os.path.abspath(self.__args[i + 1])
if no_config_file and argument_config:
print("--config-file cannot be used together with --no-config-file!")
return {"code": 1}
# Load potential config file if detected or specified as argument, but prefer config by
# argument.
config_candidate = argument_config or detected_config
loaded_config = False
if config_candidate:
c = Config.parse_file(config_candidate)
if c is None:
return {"code": 1}
loaded_config = True
config.override_from(c)
# Main parsing step.
for i in range(len(self.__args)):
arg = self.__args[i]
if arg in ("--config-file", "-c"):
# Config file parsed again only to ensure path position is correctly increased: reaching
# this point means a well-formed config file was specified and parsed.
path_pos += 2
elif arg in ("--quiet", "-q"):
config.set_quiet(True)
path_pos += 1
elif arg == "--no-quiet":
config.set_quiet(False)
path_pos += 1
elif arg.startswith("-v"):
config.set_verbose(arg.count("v"))
path_pos += 1
elif arg.startswith("-t=") or arg.startswith("--target="):
value = arg.split("=")[1]
if not config.add_target(value):
print("Invalid target: {}".format(value))
return {"code": 1}
path_pos += 1
elif arg == "--no-target":
config.clear_targets()
path_pos += 1
elif arg in ("--ignore", "-i"):
config.set_ignore_incomp(True)
path_pos += 1
elif arg == "--no-ignore":
config.set_ignore_incomp(False)
path_pos += 1
elif arg.startswith("-p=") or arg.startswith("--processes="):
value = arg.split("=")[1]
try:
processes = int(value)
if processes <= 0:
print("Non-positive number: {}".format(processes))
return {"code": 1}
config.set_processes(processes)
except ValueError:
print("Invalid value: {}".format(value))
return {"code": 1}
path_pos += 1
elif arg == "--no-dump":
config.set_print_visits(False)
path_pos += 1
elif arg in ("--dump", "-d"):
config.set_print_visits(True)
path_pos += 1
elif arg == "--hidden":
config.set_analyze_hidden(True)
path_pos += 1
elif arg == "--no-hidden":
config.set_analyze_hidden(False)
path_pos += 1
elif arg == "--versions":
versions = True
path_pos += 1
elif arg == "--show-tips":
config.set_show_tips(True)
path_pos += 1
elif arg == "--no-tips":
config.set_show_tips(False)
path_pos += 1
elif arg in ("--format", "-f"):
if (i + 1) >= len(self.__args):
print("Format requires a name! Example: --format parsable")
return {"code": 1}
fmt_str = self.__args[i + 1].lower()
fmt = formats.from_name(fmt_str)
if fmt is None:
print("Unknown format: {}".format(fmt_str))
return {"code": 1}
path_pos += 2
elif arg == "--exclude":
if (i + 1) >= len(self.__args):
print("Exclusion requires a name! Example: --exclude email.parser.FeedParser")
return {"code": 1}
config.add_exclusion(self.__args[i + 1])
path_pos += 2
elif arg == "--exclude-file":
if (i + 1) >= len(self.__args):
print("Exclusion requires a file name! Example: --exclude-file '~/exclusions.txt'")
return {"code": 1}
config.add_exclusion_file(self.__args[i + 1])
path_pos += 2
elif arg == "--no-exclude":
config.clear_exclusions()
path_pos += 1
elif arg == "--backport":
if (i + 1) >= len(self.__args):
print("Requires a backport name! Example: --backport typing")
return {"code": 1}
name = self.__args[i + 1]
if not config.add_backport(name):
print("Unknown backport: {}".format(name))
return {"code": 1}
path_pos += 2
elif arg == "--no-backport":
config.clear_backports()
path_pos += 1
elif arg == "--feature":
if (i + 1) >= len(self.__args):
print("Requires a feature name! Example: --feature fstring-self-doc")
return {"code": 1}
name = self.__args[i + 1]
if not config.enable_feature(name):
print("Unknown feature: {}".format(name))
return {"code": 1}
path_pos += 2
elif arg == "--no-feature":
config.clear_features()
path_pos += 1
elif arg == "--pessimistic":
config.set_pessimistic(True)
path_pos += 1
elif arg == "--no-pessimistic":
config.set_pessimistic(False)
path_pos += 1
elif arg == "--eval-annotations":
config.set_eval_annotations(True)
path_pos += 1
elif arg == "--no-eval-annotations":
config.set_eval_annotations(False)
path_pos += 1
elif arg in ("--violations", "--lint"):
config.set_only_show_violations(True)
path_pos += 1
elif arg in ("--no-violations", "--no-lint"):
config.set_only_show_violations(False)
path_pos += 1
elif arg == "--parse-comments":
config.set_parse_comments(True)
path_pos += 1
elif arg == "--no-parse-comments":
config.set_parse_comments(False)
path_pos += 1
elif arg == "--scan-symlink-folders":
config.set_scan_symlink_folders(True)
path_pos += 1
elif arg == "--no-symlink-folders":
config.set_scan_symlink_folders(False)
path_pos += 1
if fmt is not None:
config.set_format(fmt)
if config.only_show_violations():
if len(config.targets()) == 0:
print("Showing violations requires target(s) to be specified!")
return {"code": 1}
# Automatically set minimum verbosity mode 2 for violations mode.
if config.verbose() < 2:
config.set_verbose(2)
if config.quiet() and config.verbose() > 0 and not config.only_show_violations():
print("Cannot use quiet and verbose modes together!")
return {"code": 1}
parsable = config.format().name() == "parsable"
if parsable:
versions = False
if loaded_config and detected_config and not argument_config and not parsable:
nprint("Using detected config: {}".format(detected_config), config)
paths = self.__args[path_pos:]
return {"code": 0,
"paths": paths,
"versions": versions}
|
from . import util
from .vectors import VectorMap
def load(name=None, via=None):
package = util.get_package_by_name(name, via=via)
vector_map = VectorMap(128)
vector_map.load(package.path)
return vector_map
|
import cv2
import math
import pandas as pd
import numpy as np
import time, sys, os, shutil
import yaml
from multiprocessing import Process, Queue
from Queue import Empty
import random
import imageFeatures as imf
import pickle
from sklearn import gaussian_process
"""
# This script collects data
if len(sys.argv) < 2:
print "No configuration file specified"
collectData = False
config = None
else:
collectData = True
try:
with open(sys.argv[1]) as f:
config = yaml.load(f.read())
except:
print "Error:", sys.exc_info()[0]
raise
"""
def currentTimestamp():
return pd.Timestamp(time.time()*1000000000)
def imageSaver(foldername, q):
while True:
toSave = None
try:
toSave = q.get(True, 1)
except Empty:
pass
if toSave != None:
if toSave == False:
print "Done"
break
name, frame = toSave
cv2.imwrite(foldername + '/' + name, frame, [cv2.IMWRITE_PNG_COMPRESSION, 9])
print "Wrote", foldername + '/' + name
"""
if collectData:
# Parse the configuration file
if 'settingsFile' in config:
rdf = pd.read_csv(config['settingsFile'])
totalFrames = len(rdf)
gains0 = rdf['Gain 0']
shutters0 = rdf['Shutter 0']
gains1 = rdf['Gain 1']
shutters1 = rdf['Shutter 1']
timestamps = pd.Series([currentTimestamp()] * totalFrames)
features = pd.Series([0] * totalFrames)
imageFiles0 = pd.Series([''] * totalFrames)
imageFiles1 = pd.Series([''] * totalFrames)
frames = rdf['Frame']
"""
frames = pd.Series([], dtype=int, name='Frame')
data = pd.DataFrame(index=frames)
params = {}
def setParam(name, x):
params[name] = x
print 'Run name:',
shortname = raw_input()
cv2.namedWindow('frame')
while True:
print 'Parameter name (empty to terminate):',
name = raw_input()
if name != '':
params[name] = 0
print 'max:',
pmax = int(raw_input())
cv2.createTrackbar(name, 'frame', 0, pmax, lambda x: setParam(name, x))
else:
break
# Change 0 to the index that works
cap0 = cv2.VideoCapture(0)
cap1 = cv2.VideoCapture(1)
# Create the output directory and copy over stuff
for i in range(100):
foldername = 'data/' + shortname + '_' + str(i)
if not os.path.exists(foldername):
os.makedirs(foldername)
break
"""
shutil.copy(sys.argv[1], foldername)
if 'settingsFile' in config:
shutil.copy(config['settingsFile'], foldername)
"""
def setCap0Exposure(x):
cap0.set(15,x)
def setCap1Exposure(x):
cap1.set(15,x)
def setCap0Gain(x):
cap0.set(14,x)
def setCap1Gain(x):
cap1.set(14,x)
def setCap0Auto(x):
cap0.set(21,x)
def setCap1Auto(x):
cap1.set(21,x)
def findMeanLumSettings(oldSettings, oldFeatures, newFeatures):
oldShutter, oldGain = oldSettings
newShutter = 1.0
newGain = 16.0
oldMeanLum = oldFeatures
newMeanLum = newFeatures
oldExposure = imf.settingsToExposure(oldShutter, oldGain)
newExposure = 111.2148 + 0.6940*oldExposure - 2.7011*oldMeanLum + 2.6972*newMeanLum
newShutter, newGain = imf.exposureToSettings(newExposure)
return newShutter, newGain
def findLinearFeatureLumSettings(oldSettings, oldFeatures, newFeatures):
oldShutter, oldGain = oldSettings
oldBlurLum = oldFeatures
newBlurLum = newFeatures
oldExposure = imf.settingsToExposure(oldShutter, oldGain)
newExposure = -35.4155 + 0.7933*oldExposure - 2.1544*oldBlurLum + 2.856*newBlurLum
newShutter, newGain = imf.exposureToSettings(newExposure)
return np.clip(newShutter,1.0,531.0), np.clip(newGain,16.0,64.0)
gp = pickle.load(open('gp_mean.p','r'))
#params = ['Exposure 0', 'Contrast 0', 'Contrast 1', 'Blur Luminance 0', 'Blur Luminance 1', 'Mean Foreground Illumination 0', 'Mean BackGround Illumination 0', 'Mean Foreground Illumination 1', 'Mean BackGround Illumination 1']
def findGPSettings(params):
newExposure = gp.predict(params)
newShutter, newGain = imf.exposureToSettings(newExposure)
return np.clip(newShutter,1.0,531.0), np.clip(newGain,16.0,64.0)
def usableMatch(matches, keypoints, keypointsBaseline):
correctMatches = []
minAmmount = 5
srcPts=[]
dstPts=[]
for m,n in matches:
if m.distance <.75*n.distance:
correctMatches.append(m)
if len(correctMatches)>minAmmount:
dst_pts = np.float32([ keypoints[m.trainIdx].pt for m in correctMatches ])
src_pts = np.float32([ keypointsBaseline[m.queryIdx].pt for m in correctMatches ])
ransacMatches, mask= cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
matchesMask = np.array(matchesMask)
numMatches = (matchesMask>.5).sum()
efficiency = [numMatches, len(keypoints)]
else:
efficiency = [0, len(keypoints)]
return efficiency
"""
if not collectData:
cv2.createTrackbar('Shutter Baseline', 'frame', 1, 531, setCap0Exposure)
cv2.createTrackbar('Gain Baseline', 'frame', 16, 64, setCap0Gain)
cv2.createTrackbar('Shutter Compared', 'frame', 1, 531, setCap1Exposure)
cv2.createTrackbar('Gain Compared', 'frame', 16, 64, setCap1Gain)
"""
# Helper variables
t = 0
i = 0
runNum = 0
startT = 0
expCam0 = True
writing = False
resetRun = False
index_params = dict(algorithm = 0, trees = 5)
search_params = dict(checks=50)
surf = cv2.SURF()
def surfDetectAndMatch(name, q, dq):
surf = cv2.SURF()
flann = cv2.FlannBasedMatcher(index_params, search_params)
oldFrame = None
oldKp = None
oldDesc = None
while True:
newFrame = None
try:
newFrame = q.get(True, 1)
print name + ": " + str(q.qsize()) + " left"
except Empty:
if oldFrame != None:
print name + ": Resetting"
oldFrame = None
if newFrame != None:
if newFrame == False:
dq.close()
kp = None
print name + ": Done"
break
if newFrame[2] == False:
kp, desc = surf.detectAndCompute(newFrame[1], None)
else:
kp_temp, desc = newFrame[1]
kp = [cv2.KeyPoint(x=p[0][0], y=p[0][1], _size=p[1], _angle=p[2], _response=p[3],
_octave=p[4], _class_id=p[5]) for p in kp_temp]
if oldFrame != None:
if newFrame[0] == oldFrame[0]:
print name + ": New run detected"
elif newFrame[0]-oldFrame[0] > 1:
print name + ": Warning, t mismatch!"
succTrackFeatures = 0
if desc != None and oldDesc != None:
matches = flann.knnMatch(oldDesc, desc, k=2)
efficiency = usableMatch(matches, kp, oldKp)
succTrackFeatures = efficiency[0]
dq.put((newFrame[0], succTrackFeatures))
oldFrame = newFrame
oldKp = kp
oldDesc = desc
oldParams = None
collectingGP = True
oldMeanLum = None
if cap0.isOpened() and cap1.isOpened():
q = Queue()
p = Process(target=imageSaver, args=(foldername, q,))
q0 = Queue()
dq0 = Queue()
p0 = Process(target=surfDetectAndMatch, args=("SDAM 0", q0, dq0,))
q1 = Queue()
dq1 = Queue()
p1 = Process(target=surfDetectAndMatch, args=("SDAM 1", q1, dq1,))
p.start()
p0.start()
p1.start()
# Turn off white balance
cap0.set(17, -4)
cap0.set(26, -4)
cap1.set(17, -4)
cap1.set(26, -4)
"""
if not collectData:
cv2.setTrackbarPos('Shutter Baseline', 'frame', int(cap0.get(15)))
cv2.setTrackbarPos('Gain Baseline', 'frame', int(cap0.get(14)))
cv2.setTrackbarPos('Shutter Compared', 'frame', int(cap1.get(15)))
cv2.setTrackbarPos('Gain Compared', 'frame', int(cap1.get(14)))
"""
while True:
i += 1
ret0, frame0 = cap0.read()
ret1, frame1 = cap1.read()
if ret0 and ret1:
frame0 = cv2.cvtColor(frame0, cv2.COLOR_BAYER_BG2BGR)
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BAYER_BG2BGR)
disp = np.concatenate((frame0, frame1), axis=1)
try:
t0, succTrackFeatures0 = dq0.get_nowait()
data.loc[t0, 'Succesfully Tracked Features 0'] = succTrackFeatures0
except Empty:
pass
try:
t1, succTrackFeatures1 = dq1.get_nowait()
data.loc[t1, 'Succesfully Tracked Features 1'] = succTrackFeatures1
except Empty:
pass
if writing and i > 6:
gray0 = cv2.cvtColor(frame0, cv2.COLOR_BGR2GRAY)
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
# Calculate image features
if expCam0:
kp, desc = surf.detectAndCompute(gray0, None)
kp_temp = [(p.pt, p.size, p.angle, p.response, p.octave, p.class_id) for p in kp]
q0.put((t, (kp_temp, desc), True))
q1.put((t, gray1, False))
meanLum = imf.meanLuminance(gray0)
blurLum = imf.gaussianBlurfeatureLuminance(gray0, kp)
meanFg, meanBg = imf.weightedLuminance(gray0)
contrast = imf.contrast(gray0)
camSettings = (cap0.get(15), cap0.get(14))
else:
kp, desc = surf.detectAndCompute(gray1, None)
kp_temp = [(p.pt, p.size, p.angle, p.response, p.octave, p.class_id) for p in kp]
q1.put((t, (kp_temp, desc), True))
q0.put((t, gray0, False))
meanLum = imf.meanLuminance(gray1)
blurLum = imf.gaussianBlurfeatureLuminance(gray1, kp)
meanFg, meanBg = imf.weightedLuminance(gray1)
contrast = imf.contrast(gray1)
camSettings = (cap1.get(15), cap1.get(14))
newParams = (imf.settingsToExposure(camSettings[0], camSettings[1]),
contrast, blurLum, meanFg, meanBg)
if oldGray0 != None:
# Save raw data
data.loc[t, 'Timestamp'] = currentTimestamp()
data.loc[t, 'Run Number'] = runNum
data.loc[t, 'Baseline'] = 1 if expCam0 else 0
data.loc[t, 'Experimental Mean Luminance'] = meanLum
data.loc[t, 'Shutter 0'] = cap0.get(15)
data.loc[t, 'Gain 0'] = cap0.get(14)
data.loc[t, 'Shutter 1'] = cap1.get(15)
data.loc[t, 'Gain 1'] = cap1.get(14)
imgname0 = shortname + '_0_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 0'] = imgname0
imgname1 = shortname + '_1_{:0>4d}.png'.format(t)
data.loc[t, 'Image File 1'] = imgname1
q.put((imgname0, frame0))
q.put((imgname1, frame1))
if collectingGP:
data.loc[t, 'Experimental Method'] = 'GP'
params = np.array([oldParams[0], oldMeanLum, meanLum])
newShutter, newGain = findGPSettings(params)
else:
data.loc[t, 'Experimental Method'] = 'linear_blur'
newShutter, newGain = findLinearFeatureLumSettings(oldCamSettings, oldBlurLum, blurLum)
# Determine new image settings
if expCam0:
cap0.set(14, newGain)
cap0.set(15, newShutter)
else:
cap1.set(14, newGain)
cap1.set(15, newShutter)
t += 1
oldGray0 = gray0
oldGray1 = gray1
oldParams = newParams
oldBlurLum = blurLum
oldCamSettings = camSettings
oldMeanLum = meanLum
i = 0
cv2.putText(disp, "Frame: " + str(t-startT), (50,50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.putText(disp, "Baseline: " + ("1" if expCam0 else "0"), (50,80),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.putText(disp, "GP" if collectingGP else "linear_blur", (50,110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255))
cv2.imshow('frame', disp)
else:
cap0.grab()
cap1.grab()
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
# The order is to press 'w' when starting a run, then press 'r' to do it again in a pair
elif key == ord('w'):
expCam0 = random.choice((True, False))
resetRun = True
elif key == ord('e'):
resetRun = True
elif key == ord('r'):
expCam0 = not expCam0
resetRun = True
elif key == ord('s'):
writing = False
runNum += 1
elif key == ord('g'):
collectingGP = not collectingGP
if resetRun:
resetRun = False
writing = True
startT = t
oldGray0 = None
oldGray1 = None
oldParams = None
i = 0
# To start off, set auto-exposure
cap0.set(14, -2)
cap0.set(15, -2)
cap1.set(14, -2)
cap1.set(15, -2)
q.put(False)
q0.put(False)
q1.put(False)
q.close()
dq0.close()
dq1.close()
q0.close()
q1.close()
#p.join()
#p0.join()
#p1.join()
if len(data) > 0:
data.to_csv(foldername + '/' + shortname + '_rawdata.csv')
|
from rest_framework import serializers
from core.models import Recipe, Ingredient
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ("name",)
class RecipeSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(many=True, required=True)
class Meta:
model = Recipe
fields = ("id", "name", "description", "ingredients")
read_only_fields = ("id",)
def create(self, validated_data):
ingredients = validated_data.pop("ingredients", None)
recipe = Recipe.objects.create(**validated_data)
for ingredient in ingredients:
Ingredient.objects.create(recipe=recipe, **ingredient)
return recipe
def update(self, instance, validated_data):
"""Update attributes for Recipe and remove old ingredients"""
new_ingredients = validated_data.pop("ingredients", None)
old_ingredients = Ingredient.objects.all().filter(recipe=instance)
old_ingredients.delete()
instance.name = validated_data["name"]
instance.description = validated_data["description"]
instance.save()
for ingredient in new_ingredients:
Ingredient.objects.create(recipe=instance, **ingredient)
return instance
|
from rich import print
from rich.table import Table
from .ui import loading
from rich.console import Console
from .ui import Prompt
import time
class Mode():
def __init__(self, mode, about, theme):
self.mode = mode
self.about = about
self.theme = theme
self.products = None
self.table = Table(title=mode, title_justify=self.mode, title_style=self.theme,
caption=self.about, caption_justify="left", caption_style=self.theme, padding=(0, 5))
self.console = Console()
self.prompt = Prompt(self.theme)
def get_mode(self):
"""user is required to use this using rich.print"""
return self.mode
def get_about(self):
return self.about
def mode_details(self):
print(
f"[bold yellow]\n{self.mode} Mode[/bold yellow]\n[bold yellow]{self.about}[/bold yellow]")
def get_all_products(self, products):
loading("Loading All Products")
self.products = products
print()
self.console.print("All Products\n", justify="left", style="bold red")
for product in self.products:
self.data = f"[bold {self.theme}]ID: [bold red]{product.id}\n[bold {self.theme}]Company: [bold red]{product.company}\n[bold {self.theme}]Product: [bold red]{product.name}\n[bold {self.theme}]Date: [bold red]{product.date}\n"
print(self.data)
time.sleep(5)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 17:18:07 2018
@author: dawnstear
"""
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
#from plots import ecgplot
ecg = '/Users/dawnstear/desktop/PWC44/ecg.csv'
fnirs = '/Users/dawnstear/desktop/PWC44/PWC44_Oxy.txt'
def ecgplot(filename):
df = pd.read_csv(filename)
clipped_df = df[93184:108000]
y = clipped_df.values
return y
y = pd.read_fwf(fnirs)
y.columns = ['1','2','3','4','5','6','7','8','9','10','11',
'12','13','14','15','16','17','18','19','20']
channel = y['1'].values
ecgdata = ecgplot(ecg)
# -------------------------------------------
win = pg.GraphicsWindow()
win.setWindowTitle('Scrolling fNIRS data')
chunkSize = 100
maxChunks = 10
startTime = pg.ptime.time()
p5 = win.addPlot(colspan=2)
p5.setLabel('bottom', 'time', 's','oxy concentration')
p5.setXRange(-10, 0)
curves = []
data5 = channel
ptr5 = 0
def update5():
global p5, data5, ptr5, curves
now = pg.ptime.time()
for c in curves:
c.setPos(-(now-startTime), 0)
i = ptr5 % chunkSize
if i == 0:
curve = p5.plot()
curves.append(curve)
last = data5[-1]
data5 = np.empty((chunkSize+1,2))
data5[0] = last
while len(curves) > maxChunks:
c = curves.pop(0)
p5.removeItem(c)
else:
curve = curves[-1]
data5[i+1,0] = now - startTime
data5[i+1,1] = np.random.normal()
curve.setData(x=data5[:i+2, 0], y=data5[:i+2, 1])
ptr5 += 1
timer = pg.QtCore.QTimer()
timer.timeout.connect(update5)
timer.start(50)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
'''
# update all plots
def update():
update5()
#update6()'''
'''
# plot ECG data
win.nextRow()
p6 = win.addPlot(colspan=2)
p6.setLabel('bottom', 'Time', 's')
p6.setXRange(-10, 0)
curves = []
data6 = ecgdata#np.empty((chunkSize+1,2))
ptr6 = 0
def update6():
global p5, data6, ptr6, curves
now = pg.ptime.time()
for c in curves:
c.setPos(-(now-startTime), 0)
i = ptr6 % chunkSize
if i == 0:
curve = p6.plot()
curves.append(curve)
last = data6[-1]
data6 = np.empty((chunkSize+1,2))
data6[0] = last
while len(curves) > maxChunks:
c = curves.pop(0)
p5.removeItem(c)
else:
curve = curves[-1]
data6[i+1,0] = now - startTime
data6[i+1,1] = np.random.normal()
curve.setData(x=data6[:i+2, 0], y=data6[:i+2, 1])
ptr6 += 1
'''
|
# interface to run psi4 from the embedded system.
# modified Psi4 need to be complied from source to read embedding potential
# compiled version can be found in /home/goodpast/shared/Xuelan/psi4.
# change made to psi4/psi4/src/psi4/libscf_solver/hf.cc to read embpot.dat.
# add the following PSI4 executable2 to your ~/.bashrc:
# export PATH=/home/goodpast/shared/Xuelan/psi4/psi4/objdir/stage/bin:$PATH
# export PSI_SCRATCH=/scratch.global/${yourusername}/
from string import Template
import numpy as np
import os
import subprocess
class Psi4Ext:
def __init__(self, mol, method, ext_pot, core_ham, filename, work_dir, scr_dir, nproc, pmem, hl_dict, hl_excited_dict):
self.mol = mol # mol object passed from PySCF
self.method = method # excited-state method name
self.ext_pot = ext_pot # external potential/embedding potential
self.core_ham = core_ham # core_hamiltonian (unmodified)
self.filename = filename
self.work_dir = work_dir
self.scr_dir = scr_dir
self.nproc = nproc
self.pmem = pmem # in MB
if hl_excited_dict:
# # of roots for EOM-CCSD (suggest > cc3_root)
self.nroots = hl_excited_dict.get('nroots')
# only 1 root is allowed per eomcc3 calculation
self.cc3_root = hl_excited_dict.get('cc3_root')
def generate_psi4_input(self):
if self.nproc is not None and self.pmem is not None:
memory = self.pmem * self.nproc # total memory assigned in psi4 input
else:
memory = 20
nao = self.mol.nao_nr()
mol_geom = self.pyscf2psi4_geom()
mol_basis = self.pyscf2psi4_basis()
if self.mol.cart == True:
basis_type = "cartesian"
else:
basis_type = "spherical"
# mol.spin = 2S == nelec_alpha - nelec_beta
# SPIN = 2S+ 1
# generate ground-state coupled cluster input
if 'cc' in self.method and self.nroots is None:
inp_str = psi4_cc_template.substitute(MEMORY=memory,
BASIS=mol_basis, GEOM=mol_geom, CHARGE=self.mol.charge,
SPIN=self.mol.spin+1, METHOD=self.method,
BASIS_TYPE=basis_type, NCORE=self.nproc)
# generate excited-statecoupled cluster input
if 'cc' in self.method and self.nroots is not None:
inp_str = psi4_eomcc_template.substitute(MEMORY=memory,
BASIS=mol_basis, GEOM=mol_geom, CHARGE=self.mol.charge,
SPIN=self.mol.spin+1, METHOD='eom-'+self.method,
NROOTS=self.nroots, CC3_ROOT=self.cc3_root,
BASIS_TYPE=basis_type, NCORE=self.nproc)
return inp_str
def generate_psi4_embpot(self, emb_ham):
nao = self.mol.nao_nr()
#emb_ham = self.core_ham + self.ext_pot
f = open('embpot.dat', 'w')
for i in range(nao):
for j in range(nao):
f.write("%15.12f\n" % (emb_ham)[i, j])
f.close()
print('embedding potential is written to embpot.dat')
def get_energy(self):
# generate PSI4 input file
input_file = self.generate_psi4_input()
with open(self.work_dir + '/' + self.filename + '_psi4.dat', 'w') as f:
f.write(input_file)
f.close()
# generate embpot.dat, core_Hamiltonian should be included in embpot
emb_ham = self.core_ham + (self.ext_pot[0] + self.ext_pot[1])/2.
nao = self.mol.nao_nr()
## debug: print pyscf embpot
#h0 = emb_ham.reshape(nao,nao)
#h0_string = ''
#for x in range(nao):
# i=0
# for y in h0[x]:
# i += 1
# h0_string += "%15.8f" % y
# if i % 5 == 0:
# h0_string += "\n"
# if nao % 5 != 0:
# h0_string += "\n"
#with open(self.work_dir + '/embpot_pyscf.dat', 'w') as f:
# print('embpot_pyscf.dat is saved to {}'.format(f))
# f.write(h0_string)
#f.close()
I_Pyscf2Psi4 = ShPyscf2Psi4(self.mol)
emb_ham = emb_ham[I_Pyscf2Psi4, :]
emb_ham = emb_ham[: ,I_Pyscf2Psi4]
nao = self.mol.nao_nr()
h0 = emb_ham.reshape(nao,nao)
self.generate_psi4_embpot(h0)
# Run psi4
file_path = self.work_dir + '/' + self.filename + '_psi4.dat'
#print('embpot.dat is saved to {}'.format(file_path))
cmd = ' '.join(["psi4 -n", str(self.nproc),file_path])
#print(cmd)
proc_results = subprocess.getoutput(cmd)
print(proc_results)
# Open and extract from output.
energy = []
outfile = self.work_dir + '/' + self.filename + '_psi4.out'
with open(outfile, 'r') as fin:
wf_method = None
dat = fin.read()
dat1 = dat.splitlines()
# return highest-level ground state energy for embedding energy
for num, line in enumerate(dat1):
if 'RHF Final Energy' in line:
energy.insert(0, float(line.split()[-1]))
wf_method = 'RHF'
if 'MP2 total energy' in line:
energy.insert(0, float(line.split()[-1]))
wf_method = 'MP2'
if 'CCSD total energy' in line:
energy.insert(0, float(line.split()[-1]))
wf_method = 'CCSD'
if 'CC3 total energy' in line:
energy.insert(0, float(line.split()[-1]))
wf_method = 'CC3'
# print out excited-state energy to file
if 'Completed EOM_CCSD' in line:
from pyscf.data import nist
for i in range(self.nroots):
e_eomccsd = float(dat1[num-self.nroots+i].split()[1])
print(f'EOM-CCSD for root {i+1} is {e_eomccsd*nist.HARTREE2EV:.3f} eV')
if 'EOM State 1' in line:
e_eomcc3 = float(line.split()[3])
print(f'EOM-CC3 for root {self.cc3_root} is {e_eomcc3} eV')
print(f'{wf_method} is used for high-level energy')
return energy
def pyscf2psi4_geom(self):
# set unit to angstrom, which is default in Psi4
data = [[0 for x in range(4)] for y in range(self.mol.natm)]
atom_geom = [0 for x in range(self.mol.natm)]
gh_num = 0
for ia in range(self.mol.natm):
symb = self.mol.atom_pure_symbol(ia)
if (symb.upper() == "GHOST"):
symb = self.mol.ghosts[gh_num] + str(gh_num + 1)
gh_num += 1
coord = self.mol.atom_coord(ia, unit='ANG')
data[ia][0] = symb
data[ia][1] = str(coord[0])
data[ia][2] = str(coord[1])
data[ia][3] = str(coord[2])
atom_geom[ia] = " ".join(data[ia])
geom = '\n'.join(atom_geom)
return geom
def pyscf2psi4_basis(self):
# combine basis set for different atoms
basis_string = '****\n'
for basis_symb in self.mol._basis.keys():
if (basis_symb.split(":")[0].upper() == 'GHOST'):
ghost_basis = self.mol._basis[basis_symb]
basis_symb = mol.ghosts[int(basis_symb.split(
':')[1])-1] + basis_symb.split(":")[1]
basis_string += convert_basis_to_psi4(basis_symb, ghost_basis)
else:
basis_string += convert_basis_to_psi4(
basis_symb, self.mol._basis[basis_symb])
# if ('hf' not in self.method) and ('hartree-fock' not in self.method):
# basis_string += '\nset,mp2fit\ndefault, %s/mp2fit'%basis
return basis_string
def convert_basis_to_psi4(symb, basis):
from pyscf.gto.mole import _std_symbol
'''Convert pyscf internal basis format to Gaussian format string
Psi4 uses Gaussian 94 format '''
res = []
symb = _std_symbol(symb)
SPDF = ('S', 'P', 'D', 'F', 'G', 'H', 'I', 'J')
MAXL = 8
MAPSPDF = {'S': 0,
'P': 1,
'D': 2,
'F': 3,
'G': 4,
'H': 5,
'I': 6,
'J': 7}
# element name
res.append('%-2s 0' % (symb))
# gaussian formatting
for bas in basis:
for i in range(len(bas[1])-1):
res.append('%s %s 1.00' % (SPDF[bas[0]], len(bas[1:])))
for dat in bas[1:]:
res.append('%15.9f %15.9f ' % (dat[0], dat[i+1]))
#if len(bas[1]) > 2:
# for i in range(len(bas[1])-1):
# res.append('%s %s 1.00' % (SPDF[bas[0]], len(bas[1:])))
# for dat in bas[1:]:
# res.append('%15.9f %15.9f ' % (dat[0], dat[i+1]))
#elif len(bas[1]) == 2:
# res.append('%s %s 1.00' % (SPDF[bas[0]], len(bas[1:])))
# for dat in bas[1:]:
# res.append('%15.9f %15.9f ' % (dat[0], dat[1]))
#
#else:
# raise RuntimeError(
# 'Warning! Please manually check basis set format!')
# closing
res.append('****')
return '\n'.join(res)
_Pyscf2Psi4BasisPermSph = {
0: [0],
1: [2, 0, 1],
# 0 1 2 3 4
# D0 D+1 D-1 D+2 D-2
2: [2, 3, 1, 4, 0],
# 0 1 2 3 4 5 6
# F0 F+1 F-1 F+2 F-2 F+3 F-3
3: [3, 4, 2, 5, 1, 6, 0],
# 0 1 2 3 4 5 6 7 8
# G0 G+1 G-1 G+2 G-2 G+3 G-3 G+4 G-4
4: [4, 5, 3, 6, 2, 7, 1, 8, 0],
# 0 1 2 3 4 5 6 7 8 9 10
# H0 H+1 H-1 H+2 H-2 H+3 H-3 H+4 H-4 H+5 H-5
5: [5, 6, 4, 7, 3, 8, 2, 9, 1, 10, 0],
# 0 1 2 3 4 5 6 7 8 9 10 11 12
# I0 I+1 I-1 I+2 I-2 I+3 I-3 I+4 I-4 I+5 I-5 I+6 I-6
6: [6, 7, 5, 8, 4, 9, 3, 10, 2, 11, 1, 12, 0],
}
def ShPyscf2Psi4(mol):
###################################################################
### create a index list with the size of AO basis 9/13/17 ###
###################################################################
I_Pyscf2Psi4 = []
iOff = 0
# Must be the total atoms, not the basis keys.
symbol_list = []
ghost = 1
for ia in range(mol.natm):
symb = mol.atom_pure_symbol(ia)
if symb == 'Ghost':
symb = symb + ':' + str(ghost)
ghost += 1
symbol_list.append(symb)
for basis_symb in symbol_list:
index = []
# pass 1: comment line
ls = [bs[0] for bs in mol._basis[basis_symb]]
nprims = [len(bs[1:]) for bs in mol._basis[basis_symb]]
nctrs = [len(bs[1])-1 for bs in mol._basis[basis_symb]]
prim_to_ctr = {}
for i, l in enumerate(ls):
if l in prim_to_ctr:
prim_to_ctr[l][0] += nprims[i]
prim_to_ctr[l][1] += nctrs[i]
else:
prim_to_ctr[l] = [nprims[i], nctrs[i]]
for l in set(ls):
for i in range(prim_to_ctr[l][1]):
index.append(l)
for l in index:
I_Pyscf2Psi4 += [(o + iOff) for o in _Pyscf2Psi4BasisPermSph[l]]
iOff += 2*l + 1
I_Pyscf2Psi4 = np.array(I_Pyscf2Psi4)
return I_Pyscf2Psi4
psi4_cc_template = Template('''#! Psi4 input generated by QSoME
memory $MEMORY MB # total memory not per thread memory for parallel jobs
molecule acrolein {
$CHARGE $SPIN
# acrolein geometry from MD snapshots
$GEOM
units angstrom # default in psi4
symmetry c1 # no symmetry with embedding potential
noreorient # prevent reorienting molecules
nocom # prevent recentering molecules
}
set {
freeze_core false
#df_basis_scf aug-cc-pvdz-jkfit
#df_basis_cc aug-cc-pvdz-ri
#scf_type df
#cc_type df
scf_type pk
CC_NUM_THREADS $NCORE
}
basis {
# generated by PySCF
$BASIS_TYPE
$BASIS
}
energy('$METHOD')
''')
psi4_eomcc_template = Template('''#! Psi4 input generated by QSoME
memory $MEMORY MB # total memory not per thread memory for parallel jobs
molecule acrolein {
$CHARGE $SPIN
# acrolein geometry from MD snapshots
$GEOM
units angstrom # default in psi4
symmetry c1 # no symmetry with embedding potential
noreorient # prevent reorienting molecules
nocom # prevent recentering molecules
}
set {
roots_per_irrep [$NROOTS]
freeze_core false
df_basis_scf aug-cc-pvdz-jkfit
df_basis_cc aug-cc-pvdz-ri
scf_type df
cc_type df
#scf_type pk
CC_NUM_THREADS $NCORE
}
set cclambda {
r_convergence 4
}
# EOM-CC3 can only calculate one root at a time
# The highest root is calculated by default
# use PROP_ROOT to assign the desired root
set cceom {
r_convergence 3
e_convergence 5
PROP_ROOT $CC3_ROOT
}
basis {
# generated by PySCF
$BASIS_TYPE
$BASIS
}
energy('$METHOD')
''')
## Spherical basis function angular momentum ordering
## https://github.com/psi4/psi4/blob/master/psi4/src/psi4/libmints/writer.cc
## https://psicode.org/psi4manual/master/prog_blas.html
# // Molpro:
# // '1s',
# // '2px','2py','2pz'
# // '3d0','3d2-','3d1+','3d2+','3d1-'
# // '4f1+','4f1-','4f0','4f3+','4f2-'
# // '4f3-','4f2+'
# // '5g0','5g2-','5g1+','5g4+','5g1-','5g2+'
# // '5g4-','5g3+','5g3-'
# // '6h1+','6h1-','6h2+','6h3+','6h4-','6h3-','6h4+','6h5-','6h0','6h5+','6h2-'
# // '7i6+','7i2-','7i5+','7i4+','7i5-','7i2+','7i6-','7i3+','7i4-','7i0','7i3-','7i1-','7i1+'
#
## PySCF ordering
## https://github.com/sunqm/libcint/blob/master/doc/program_ref.pdf
## https://github.com/pyscf/pyscf/issues/1023
## https://en.wikipedia.org/wiki/Table_of_spherical_harmonics#Real_spherical_harmonics
## follow CCA standard excepts of p orbitals (l=1)
# // PySCF:
# // '1s'
# // '2px','2py','2pz' or '2p1-','2p0','2p1+'
# // '3d2-','3d1-','3d0','3d1+','3d2+'
# // '4f3-','4f2-','4f1-','4f0','4f1+','4f2+','4f3+'
# // '-l, -l+1, -l+2,..,0,..,l-2,l-1,l'
#
## PSI4 ordering
## https://github.com/MolSSI/QCSchema/issues/45
## https://github.com/psi4/psi4/blob/master/psi4/src/psi4/libmints/writer.cc#L421-L519
#
#
## Gaussian/Molden/PSI4 ordering
## https://gau2grid.readthedocs.io/_/downloads/en/stable/pdf/
# // Gaussian/Molden/PSI4
# // '1s'
# // '2pz','2px','2py' or '2p0','2p1+','2p1-'
# // '3d0','3d1+','3d1-','3d2+','3d2-'
# // '4f0','4f1+','4f1-','4f2+','4f2-','4f3+','4f3-'
# // '0, 1+, 1-, 2+, 2-, ..., l+, l-'
# Psi4 python script to test the ordering
#import psi4
#import numpy as np
#np.set_printoptions(suppress=True)
#
#
#psi4.set_options({'scf_type': 'pk'})
#
#h2o = psi4.geometry("""
# 0 1
# H
# O 1 0.957
# H 2 0.957 1 104.5
#""")
#
#psi4.basis_helper("""
## CC-pvdz
#spherical
#****
#H 0
#S 3 1.00
# 1.301000D+01 1.968500D-02
# 1.962000D+00 1.379770D-01
# 4.446000D-01 4.781480D-01
#S 1 1.00
# 1.220000D-01 1.000000D+00
#P 1 1.00
# 7.270000D-01 1.0000000
#****
#O 0
#S 8 1.00
# 1.172000D+04 7.100000D-04
# 1.759000D+03 5.470000D-03
# 4.008000D+02 2.783700D-02
# 1.137000D+02 1.048000D-01
# 3.703000D+01 2.830620D-01
# 1.327000D+01 4.487190D-01
# 5.025000D+00 2.709520D-01
# 1.013000D+00 1.545800D-02
#S 8 1.00
# 1.172000D+04 -1.600000D-04
# 1.759000D+03 -1.263000D-03
# 4.008000D+02 -6.267000D-03
# 1.137000D+02 -2.571600D-02
# 3.703000D+01 -7.092400D-02
# 1.327000D+01 -1.654110D-01
# 5.025000D+00 -1.169550D-01
# 1.013000D+00 5.573680D-01
#S 1 1.00
# 3.023000D-01 1.000000D+00
#P 3 1.00
# 1.770000D+01 4.301800D-02
# 3.854000D+00 2.289130D-01
# 1.046000D+00 5.087280D-01
#P 1 1.00
# 2.753000D-01 1.000000D+00
#D 1 1.00
# 1.185000D+00 1.0000000
#****
#""")
#scf_e, wfn = psi4.energy('scf',return_wfn=True)
#F_ao = wfn.Fa_subset("AO").to_array()
#array([[ -0.48136266, -0.61493728, -0.36962934, 0. ,
# 0.5090822 , -1.09424183, -0.44549065, -0.84186585,
# 0.37987649, 0. , -0.46417694, 0.24712484,
# 0. , -0.28138229, -0.0223638 , 0. ,
# 0.06550476, 0.04399988, 0. , -0.29953392,
# -0.42998243, -0.10502384, 0. , -0.39735089],...]
#PySCF scirpt to confirm the ordering
#import pyscf
#from pyscf import gto,scf
#import numpy as np
#np.set_printoptions(suppress=True)
#mol = pyscf.M(
# atom ='H 0.0 0.756689 -0.520321;O 0.0 0.0 0.065570;H 0 -0.756689 -0.520321',
# symmetry = False,
#)
#
#
#
#mol.basis = {'H': gto.basis.parse("""
##BASIS SET: (4s,1p) -> [2s,1p]
#H S
# 1.301000E+01 1.968500E-02 0.000000E+00
# 1.962000E+00 1.379770E-01 0.000000E+00
# 4.446000E-01 4.781480E-01 0.000000E+00
# 1.220000E-01 0.0000000E+00 1.000000E+00
#H P
# 7.270000E-01 1.0000000
#"""),
#'O': gto.basis.parse("""
##BASIS SET: (9s,4p,1d) -> [3s,2p,1d]
#O S
# 1.172000E+04 7.100000E-04 -1.600000E-04 0.000000E+00
# 1.759000E+03 5.470000E-03 -1.263000E-03 0.000000E+00
# 4.008000E+02 2.783700E-02 -6.267000E-03 0.000000E+00
# 1.137000E+02 1.048000E-01 -2.571600E-02 0.000000E+00
# 3.703000E+01 2.830620E-01 -7.092400E-02 0.000000E+00
# 1.327000E+01 4.487190E-01 -1.654110E-01 0.000000E+00
# 5.025000E+00 2.709520E-01 -1.169550E-01 0.000000E+00
# 1.013000E+00 1.545800E-02 5.573680E-01 0.000000E+00
# 3.023000E-01 0.0000000E+00 0.0000000E+00 1.000000E+00
#O P
# 1.770000E+01 4.301800E-02 0.000000E+00
# 3.854000E+00 2.289130E-01 0.000000E+00
# 1.046000E+00 5.087280E-01 0.000000E+00
# 2.753000E-01 0.0000000E+00 1.000000E+00
#O D
# 1.185000E+00 1.0000000
#""")}
#
#mol.build()
#myhf = scf.RHF(mol)
#myhf.kernel()
#myhf.get_fock()
#array([[ -0.48136265, -0.61493739, 0. , 0.5090822 ,
# -0.3696292 , -1.09424184, -0.44549045, -0.84186578,
# 0. , -0.4641769 , 0.37987652, 0. ,
# -0.28138211, 0.24712505, 0. , 0.06550478,
# -0.02236383, -0. , 0.04399993, -0.29953415,
# -0.42998266, 0. , -0.3973508 , -0.10502371],...]
# use the same contraction from basissetexchange.org
# Psi4 and PySCF fock matrices are identical at the accuracy of 1e-6
if __name__ == '__main__':
print(dir(Psi4Ext))
|
"""A module consisting of various meshing functions."""
# ***********************************************************************
#
# FILE mesh.py
#
# AUTHOR Dr. Vishal Sharma
#
# VERSION 1.0.0-alpha4
#
# WEBSITE https://github.com/vxsharma-14/project-NAnPack
#
# NAnPack Learner's Edition is distributed under the MIT License.
#
# Copyright (c) 2020 Vishal Sharma
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with
# NAnPack Learner's Edition.
#
# ***********************************************************************
from .backend.exceptions import InvalidValueError
def ComputeGridPoints(Dimension, Length, delX, Height=None, delY=None):
"""Return the grid points along X and Y direction in the mesh.
Call signature:
ComputeGridPoints(Dimension, Length, delX, Height=None, delY=None)
Parameters
----------
Dimension: str
Dimension of the domain. Allowed inputs are "1D" or "2D".
Length: float
Length of the domain.
delX: float
Grid step size along X-axis.
Height: float
Height of the domain. Value required for 2D applications.
delY: float
Grid step size along Y-axis. Value required for 2D applications.
Returns
-------
iMax : int
Number of grid points along X-axis within the domain.
jMax : int
Number of grid points along Y-axis within the domain. Returns 0 for
1D applications.
"""
iMax = int(Length/delX) + 1
if Dimension.upper() == "2D":
jMax = int(Height/delY) + 1
else:
jMax = 0
print("Calculating grid size: Completed.")
return iMax, jMax
def ComputeGridSteps(Dimension, Length, iMax, Height=None, jMax=None):
"""Return the uniform grid steps size along X and Y axis.
Call signature:
ComputeGridSteps(Dimension, Length, iMax, Height=None, jMax=None)
Parameters
----------
Dimension: str
Dimension of the domain. Allowed inputs are "1D" or "2D".
Length: float
Length of the domain.
iMax : int
Number of grid points along X-axis within the domain.
Height: float
Height of the domain. Value required for 2D applications.
jMax : int
Number of grid points along Y-axis within the domain. Value
required for 2D applications.
Returns
-------
delX: float
Grid step size along X-axis.
delY: float
Grid step size along Y-axis. Returns 0.0 for 1D applications.
"""
delX = Length/(iMax - 1)
if Dimension.upper() == "2D":
delY = Height/(jMax - 1)
else:
delY = 0.0
print("Calculating grid step size: Completed.")
return delX, delY
def RectangularGrid(dX, iMax, dY=None, jMax=None):
"""Return a rectangular uniform rectangular mesh.
X and/or Y grid point locations are computed in a cartesian coordinate
system using the grid step size and grid points.
Call Signature:
RectangularGrid(dX, iMax, dY=None, jMax=None)
Parameters
----------
dX: float
Grid step size along X-axis.
iMax : int
Number of grid points along X-axis within the domain.
dY: float
Grid step size along Y-axis. Value required for 2D applications.
jMax : int
Number of grid points along Y-axis within the domain. Value
required for 2D applications.
Returns
-------
X: 1D or 2D array, float
Returns X coordinates at each grid points locations.
Y: 2D array, float
Returns Y coordinates at each grid points locations. Returns 0 for
1D applications.
"""
import numpy as np
if isinstance(dY, float) and isinstance(jMax, int):
X = np.zeros((iMax, jMax), dtype="float")
Y = np.zeros((iMax, jMax), dtype="float")
for i in range(0, iMax):
for j in range(0, jMax):
X[i][j] = i*dX
Y[i][j] = j*dY
else:
X = np.zeros((iMax), dtype="float")
for i in range(0, iMax):
X[i] = i*dX
Y = 0.0
print("Uniform rectangular grid generation in cartesian\
coordinate system: Completed.")
return X, Y
def CurvilinearGrid(dX, iMax, dY=None, jMax=None):
"""Return a rectangular uniform/non-uniform rectangular mesh.
Documentation incomplete. This routine is under construction.
"""
print("Calculating X and Y locations of all grid points within\
the mesh.")
from .backend import gridmetrics
from .backend import plotmetrics
dXi = 1.0
dEta = 1.0
X, Y = RectangularGrid(dX, iMax, dY, jMax)
dim = X.shape
if len(dim) == 2: # Two dimensional
Xi = [[i*dXi for j in range(0, jMax)] for i in range(0, iMax)]
Eta = [[j*dEta for j in range(0, jMax)] for i in range(0, iMax)]
XiX, XiY, EtaX, EtaY, JJ = gridmetrics.Metrics2D(X, Y)
print("Grid metrics and Jacobian evaluation: Completed.")
plotmetrics.PlotMetrics2D(X, Y, XiX, XiY, EtaX, EtaY)
elif len(dim) == 1:
Xi = [i*dX for i in range(0, iMax)]
Eta = 0.0
Xi, Eta, JJ = gridmetrics.Metrics1D(X)
print("Grid metrics and Jacobian evaluation: Completed.")
print("Grid transformation to curvilinear coordinate system:\
Completed.")
return X, Y
def CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model):
"""Return the time step size in the numerical approximation.
Call Signature:
CalcTimeStep(CFL, diff, conv, dX, dY, Dimension, Model)
Parameters
----------
CFL: float
In this program, CFL is treated as the
diffusion number for diffusion equations, and
Courant number for the convection equations.
Caution: This is not a true numerical definition of CFL though.
diff : float
Physics specific coefficient in the diffusion model.
For example, kinematic viscosity or thermal diffusivity.
conv: float
Physics specific coefficient in the convection model.
For example, speed of sound in the first-order linear wave eq.
dX: float
Grid step size along X-axis.
dY: float
Grid step size along Y-axis. Value required for 2D applications.
Dimension: str
Dimension of the domain. Allowed inputs are "1D" or "2D".
Model: str
Model of the governing equation. To see available options for this
parameter, type the following command on your terminal
python fetchoption.py "model"
Returns
-------
TimeStep: float
Time step in the model equation.
"""
# ************** DIFFUSION EQN. ******************
if Model.upper() == "DIFFUSION":
dX2 = dX*dX
if Dimension.upper() == "1D":
TimeStep = CFL*dX2/diff
elif Dimension.upper() == "2D":
dY2 = dY*dY
TimeStep = CFL*(1.0/((1/dX2) + (1/dY2)))/diff
# ************** FIRST-ORDER WAVE EQN. *****************
elif Model.upper() == "FO_WAVE":
if Dimension.upper() == "1D":
TimeStep = CFL*dX/conv
# ************** BURGERS EQN. *****************
elif Model.upper() in ["INV_BURGERS", "VISC_BURGERS"]:
if Dimension.upper() == "1D":
TimeStep = CFL*dX
print("Calculating time step size for the simulation: Completed.")
return TimeStep
def CalcMaxSteps(State, nMax, dT, simTime):
"""Return the max iteration/time steps for the program to run.
Call Signature:
CalcMaxSteps(State, nMax, dT, simTime)
Parameters
----------
State: str
State at which the final solution is desired. It can be
steady-state or transient.
To obtain solution at several intermediate time steps before
convergence, use transient option and provide the time in
configuration file at which the solution is desired. The
program will calculate when to stop the solution.
Available inputs are "STEADY" or "TRANSIENT"
nMax: int
Maximum number of iterations until which the program must seek
convergence. If convergence is not achieved after going thtough
nMax steps, the program will stop solving any further.
dT: float
Time step in the discretized equation. The value is auto calculated
by the program from the CFL value during the configuration step.
simTime: float
Intermediate time before convergence at which numerical solution
is required.
Returns
-------
MaxSteps: int
Maximum iteration/time steps for the program to run.
"""
if State.upper() == "TRANSIENT":
if not simTime > 0.0: # simulation time can't be negative
raise InvalidValueError("SIM_TIME", simTime)
try:
MaxSteps = int(simTime/dT)
except dT:
raise Exception("No time step provided.")
elif State.upper() == "STEADY":
MaxSteps = nMax
print("Calculating maximum iterations/steps for the simulation:\
Completed.")
return MaxSteps
|
# coding: utf-8
from DataFormats import FWLite
def getGeantTrackIds(obj):
geantTracks = obj.g4Tracks()
return [t.trackId() for t in geantTracks]
def makeTPtoSCMap(trackingParticles, simClusters):
tp_map = {t.g4Tracks().at(0).trackId() : t for t in trackingParticles}
tp_map = {}
tp_sc_map = {}
for tp in trackingParticles:
trackIds = getGeantTrackIds(tp)
for trackId in trackIds:
if trackId in tp_map:
print(trackId, tp_map)
raise RuntimeError("Found track mapped to multiple tracking particles")
tp_map[trackId] = tp
tp_sc_map[tp] = []
for sc in simClusters:
trackIds = getGeantTrackIds(sc)
for trackId in trackIds:
if trackId in tp_map:
tp = tp_map[trackId]
tp_sc_map[tp].append(sc)
return tp_sc_map
events = FWLite.Events("test_RECO.root")
for event in events:
tp_handle = FWLite.Handle("std::vector<TrackingParticle>")
event.getByLabel("mix:MergedTrackTruth", tp_handle)
trackingParticles = tp_handle.product()
sc_handle = FWLite.Handle("std::vector<SimCluster>")
#event.getByLabel("mix:MergedCaloTruth", sc_handle)
event.getByLabel("hgcSimTruth", sc_handle)
simClusters = sc_handle.product()
tp_sc_map = makeTPtoSCMap(trackingParticles, simClusters)
print("Length of tracking particles is", len(trackingParticles))
print("Length of simClusters is", len(simClusters))
print("Length of tp-> sc is", len(tp_sc_map))
associated_scs = set()
unassociated_tps = set()
map(lambda x: associated_scs.update(x), tp_sc_map.values())
unassociated_tps = [k for (k,v) in tp_sc_map.iteritems() if not v]
multassociated_tps = [k for (k,v) in tp_sc_map.iteritems() if len(v) > 1]
print("Number of SCs associated to TPs", len(associated_scs))
print("Number of unassociated TPs", len(unassociated_tps))
print("Number of TPs associated to multiple SCs", len(multassociated_tps))
|
""" NOTES
- based on the awesome work by the fmriprep people
To do
- add cosine basis set
- add WM and global signal (global signal cf. power 2016, GSSCOR)
"""
import nipype.pipeline as pe
from nipype.interfaces.io import DataSink
from nipype.interfaces.utility import IdentityInterface, Merge, Rename
from nipype.algorithms.confounds import TCompCor, ACompCor
from nipype.interfaces import fsl
from .nodes import Erode_mask, Combine_component_files
from ...utils import Extract_task
def pick_wm(files):
return files[2]
def pick_csf(files):
return files[0]
def extract_basename(files):
return [f.split('/')[-1] for f in files]
def create_compcor_workflow(name='compcor'):
""" Creates A/T compcor workflow. """
input_node = pe.Node(interface=IdentityInterface(fields=[
'in_file',
'fast_files',
'highres2epi_mat',
'n_comp_tcompcor',
'n_comp_acompcor',
'output_directory',
'sub_id'
]), name='inputspec')
output_node = pe.Node(interface=IdentityInterface(fields=[
'tcompcor_file',
'acompcor_file',
'epi_mask'
]), name='outputspec')
extract_task = pe.MapNode(interface=Extract_task,
iterfield=['in_file'], name='extract_task')
rename_acompcor = pe.MapNode(interface=Rename(format_string='task-%(task)s_acompcor.tsv',
keepext=True),
iterfield=['task', 'in_file'], name='rename_acompcor')
datasink = pe.Node(DataSink(), name='sinker')
datasink.inputs.parameterization = False
average_func = pe.MapNode(interface=fsl.maths.MeanImage(dimension='T'),
name='average_func', iterfield=['in_file'])
epi_mask = pe.MapNode(interface=fsl.BET(frac=.3, mask=True, no_output=True,
robust=True),
iterfield=['in_file'], name='epi_mask')
wm2epi = pe.MapNode(fsl.ApplyXFM(interp='nearestneighbour'),
iterfield=['reference'],
name='wm2epi')
csf2epi = pe.MapNode(fsl.ApplyXFM(interp='nearestneighbour'),
iterfield=['reference'],
name='csf2epi')
erode_csf = pe.MapNode(interface=Erode_mask, name='erode_csf',
iterfield=['epi_mask', 'in_file'])
erode_csf.inputs.erosion_mm = 0
erode_csf.inputs.epi_mask_erosion_mm = 30
erode_wm = pe.MapNode(interface=Erode_mask, name='erode_wm',
iterfield=['epi_mask', 'in_file'])
erode_wm.inputs.erosion_mm = 6
erode_wm.inputs.epi_mask_erosion_mm = 10
merge_wm_and_csf_masks = pe.MapNode(Merge(2), name='merge_wm_and_csf_masks',
iterfield=['in1', 'in2'])
# This should be fit on the 30mm eroded mask from CSF
tcompcor = pe.MapNode(TCompCor(components_file='tcomcor_comps.txt'),
iterfield=['realigned_file', 'mask_files'],
name='tcompcor')
# WM + CSF mask
acompcor = pe.MapNode(ACompCor(components_file='acompcor_comps.txt',
merge_method='union'),
iterfield=['realigned_file', 'mask_files'],
name='acompcor')
compcor_wf = pe.Workflow(name=name)
compcor_wf.connect(input_node, 'in_file', extract_task, 'in_file')
compcor_wf.connect(extract_task, 'task_name', rename_acompcor, 'task')
compcor_wf.connect(acompcor, 'components_file', rename_acompcor, 'in_file')
compcor_wf.connect(input_node, 'sub_id', datasink, 'container')
compcor_wf.connect(input_node, 'output_directory', datasink,
'base_directory')
compcor_wf.connect(input_node, ('fast_files', pick_wm), wm2epi, 'in_file')
compcor_wf.connect(epi_mask, 'mask_file', wm2epi, 'reference')
compcor_wf.connect(input_node, 'highres2epi_mat', wm2epi, 'in_matrix_file')
compcor_wf.connect(input_node, ('fast_files', pick_csf), csf2epi, 'in_file')
compcor_wf.connect(epi_mask, 'mask_file', csf2epi, 'reference')
compcor_wf.connect(input_node, 'highres2epi_mat', csf2epi, 'in_matrix_file')
compcor_wf.connect(input_node, 'n_comp_tcompcor', tcompcor, 'num_components')
compcor_wf.connect(input_node, 'n_comp_acompcor', acompcor, 'num_components')
compcor_wf.connect(input_node, 'in_file', average_func, 'in_file')
compcor_wf.connect(average_func, 'out_file', epi_mask, 'in_file')
compcor_wf.connect(epi_mask, 'mask_file', erode_csf, 'epi_mask')
compcor_wf.connect(epi_mask, 'mask_file', erode_wm, 'epi_mask')
compcor_wf.connect(wm2epi, 'out_file', erode_wm, 'in_file')
compcor_wf.connect(csf2epi, 'out_file', erode_csf, 'in_file')
compcor_wf.connect(erode_wm, 'roi_eroded', merge_wm_and_csf_masks, 'in1')
compcor_wf.connect(erode_csf, 'roi_eroded', merge_wm_and_csf_masks, 'in2')
compcor_wf.connect(merge_wm_and_csf_masks, 'out', acompcor, 'mask_files')
compcor_wf.connect(input_node, 'in_file', acompcor, 'realigned_file')
compcor_wf.connect(input_node, 'in_file', tcompcor, 'realigned_file')
compcor_wf.connect(erode_csf, 'epi_mask_eroded', tcompcor, 'mask_files')
#compcor_wf.connect(tcompcor, 'components_file', output_node, 'acompcor_file')
#compcor_wf.connect(acompcor, 'components_file', output_node, 'tcompcor_file')
compcor_wf.connect(epi_mask, 'mask_file', output_node, 'epi_mask')
compcor_wf.connect(rename_acompcor, 'out_file', datasink, 'acompcor_file')
#compcor_wf.connect(tcompcor, 'components_file', combine_files, 'tcomp')
#compcor_wf.connect(acompcor, 'components_file', combine_files, 'acomp')
#compcor_wf.connect(combine_files, 'out_file', datasink, 'confounds')
return compcor_wf
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 10:36:47 2018
Generate the summary file for one clade of below:
Fungi, Prokaryote, All unclassified Eukaryote, Viruses, Metazoa (animal), Viridiplantae (Plant)
Two files will be generated:
ncbi_[clade]_gtdb_taxonomy.txt
ncbi_[clade]_genomes_download.sh
Coders who love to comment their code are unlikely to have bad luck.
@author: Zewei Song
@email: songzewei@genomics.cn
"""
#%%
from __future__ import print_function
from __future__ import division
import argparse
from pathlib import Path
import os
parser = argparse.ArgumentParser()
parser.add_argument('clade', choices=['Fungi', 'Prokaryota', 'Virus', 'Metazoa', 'Viridiplantae', 'Unclassified_Eukaryota'], \
help='Specify a clade to parse.')
args = parser.parse_args()
clade = args.clade
# Check Unclassified (and Candida) redundancy
def fix_unclassified(taxa):
if type(taxa) != 'list':
taxa = taxa.split(';')
assert len(taxa) == 7
name = taxa[0][3:]
taxa_nr = []
for i, item in enumerate(taxa): # Starting from Phylum
if i == 0:
taxa_nr.append(item)
else:
if item[3:] == 'Unclassified' and taxa[i-1][3:15] != 'Unclassified':
taxa_nr.append(item + taxa[i-1][3:])
name = taxa[i-1][3:]
elif item[3:] == 'Unclassified' and taxa[i-1][3:] == 'Unclassified':
taxa_nr.append(item + name)
elif item[3:] == 'Candida':
taxa_nr.append(item + taxa[i-1][3:])
else:
taxa_nr.append(item)
if taxa == taxa_nr:
value = 0
else:
value = 1
return (taxa_nr, value)
# An example taxonomy string
#t = 'd__Protozoa;p__Unclassified;c__Filasterea;o__Unclassified;f__Unclassified;g__Capsaspora;s__Capsaspora owczarzaki'
term = {'Fungi':((1, 'Fungi'),), 'Prokaryota':((0, 'Bacteria'), (0, 'Archaea')), \
'Virus':((0, 'Viruses'),), 'Metazoa':((1, 'Metazoa'),), 'Viridiplantae':((1, 'Viridiplantae'),),\
'Unclassified_Eukaryota':((0, 'Eukaryota'), (1, 'Unclassified'))}
genome_type = {'reference genome':'RS_', 'representative genome':'RS_', 'na':'GB_'}
#clade = 'Fungi'
if clade in term:
print('{0} is the chosen clade.'.format(clade))
else:
print('{0} is not a legal clade.'.format(clade))
print('Now parsing the genomes in NCBI ...')
content = []
with open('ncbi_genbank_genomes.txt', 'r') as f:
for line in f:
line = line.strip('\n').split('\t')
if clade != 'Unclassified_Eukaryota':
for search in term[clade]:
if line[search[0]+1] == search[1] and line[-1] != 'na': # need to check if the FTP is 'na'
content.append([line[0], line[search[0]+1]] + line[2:])
else:
pass
else:
if line[term[clade][0][0]+1] == term[clade][0][1] and line[term[clade][1][0]+1] == term[clade][1][1] and line[-1] != 'na': # need to check if the FTP is 'na'
content.append([line[0], clade] + line[2:])
count = 0
with open('ncbi_' + clade + '_gtdb_taxonomy.txt', 'wt') as f:
for line in content:
line[8] = line[8].replace(' ', '_')
taxa = 'd__' + line[1] + ';p__' + line[3] + ';c__' + line[4] + ';o__' + line[5] + ';f__' + line[6] + ';g__' + line[7] + ';s__' + line[8]
taxa = fix_unclassified(taxa)
count += taxa[1]
accid = genome_type[line[10]] + line[9]
f.write('{0}\t{1}\n'.format(accid, ';'.join(taxa[0])))
print('Finished analysis.')
print('\tFound {0} genomes with FTP link.'.format(len(content)))
print('\tFixed {0} taxa with ambiguous names.'.format(count))
folder = Path('genomes_' + clade)
finished_list = {}
if folder.is_dir():
print('The folder genomes_{0}/ is there, I will check for genomes already downloaded.'.format(clade))
filenames = os.listdir(folder)
print('Seems there are {0} files already there.'.format(len(filenames)))
for item in filenames:
accid = item[:15]
finished_list[accid] = item
else:
print('I will create a new folder named "genomes_{0}"'.format(clade))
os.makedirs(folder)
downloaded_count = 0
filenames = os.listdir('genomes_Prokaryota_1')
for item in filenames:
downloaded_count += 1
accid = item[:15]
finished_list[accid] = item
print('{0} genomes alreaded downloaded into another folder\n'.format(downloaded_count))
count = 0
count_to_fetch = 0
with open('ncbi_' + clade + '_genomes_download.txt', 'wt',newline='') as f:
#f.write('mkdir genomes_{0}\n'.format(clade))
for line in content:
count += 1
ftp = line[-1]
link = ftp + '/' + line[-1].split('/')[-1] + '_genomic.fna.gz'
accid = line[9]
#f.write('wget -c {0} --directory-prefix=genomes_{1} --wait=5 --random-wait\n'.format(link, clade))
try:
finished_list[accid]
except KeyError:
count_to_fetch += 1
f.write('{0}\n'.format(link))
print('Found {0} genomes availabe in NCBI genomes FTP.'.format(count))
if count_to_fetch != 0:
print('Need to download {0} genomes.'.format(count_to_fetch))
print('The FTP list for download is in {0}.'.format('ncbi_' + clade + '_genomes_download.txt'))
print('You can download them in parallel using:\n')
print('cat {0} | parallel -j 4 wget -q -c {1} --directory-prefix=genomes_{2}'.format('ncbi_' + clade + '_genomes_download.txt', '{}', clade))
print('check parallel -h for how to set the parameters.')
else:
print('You have all the genomes in this clade.')
|
#!/usr/bin/python3
"""
Script to parse Intersight HAR files
Returns:
HTTP Method
X-Startship-Token
URL
"""
import json
filename = input("Please enter the HAR Filename: ")
with open(filename, "r") as f:
har_data = f.read()
json_data = json.loads(har_data)
for entry in json_data["log"]["entries"]:
http_method = entry["request"]["method"]
http_url = entry["request"]["url"]
for cookie in entry["request"]["cookies"]:
if cookie["name"] == "X-Starship-Token":
x_startship_token = cookie["value"]
# http_response_data = entry["response"]["content"]["text"]
print("")
print("HTTP METHOD : " + http_method)
print("X-Startship-Token : " + x_startship_token)
print("URL : " + http_url)
# Using F-string for Python 3.6+
# print(f"HTTP METHOD : {http_method}")
# print(f"X-Startship-Token : {x_startship_token}")
# print(f"URL : {http_url}")
# Write Output to a text file
# with open("harFiltered.log", 'a') as f:
# f.write("\n")
# f.write("HTTP METHOD : " + http_method + "\n")
# f.write("X-Startship-Token : " + x_startship_token + "\n")
# f.write("URL : " + http_url + "\n")
|
from ..Model.Player.Player import Player
from ..Model.Board.Board import Board
class GameController:
def __init__(self, debug=False):
self.board = Board()
self.red = Player(True, self.board, debug)
self.black = Player(False, self.board, debug)
self.redNext = True
self.debug = debug
def GetMoveOption(self, x, y, player):
if player.pieces[(x, y)] is None:
return False, []
else:
return True, player.MoveDirection(x, y)
def Red_Move(self, x, y, new_x, new_y):
if self.redNext:
valid, directions = self.GetMoveOption(x, y, self.red)
if valid and (new_x - x, new_y - y) in directions:
if self.debug:
self.Shout(x, y, new_x, new_y, self.red)
x, y = self.red.Move(x, y, new_x, new_y)
if not x == -1:
self.black.Terminate(x, y)
self.redNext = False
return True
else:
return False
else:
return False
def Black_Move(self, x, y, new_x, new_y):
if not self.redNext:
valid, directions = self.GetMoveOption(x, y, self.black)
if valid and (new_x - x, new_y - y) in directions:
if self.debug:
self.Shout(x, y, new_x, new_y, self.black)
x, y = self.black.Move(x, y, new_x, new_y)
if not x == -1:
self.red.Terminate(x, y)
self.redNext = True
return True
else:
return False
else:
return False
def Shout(self, x, y, new_x, new_y, player):
piece = player.pieces[(x, y)]
print(f'{player.color}: {piece.name} 从 ({x}, {y}) 移动到 ({new_x}, {new_y})')
def GetNextPlayer(self):
if self.redNext:
self.redNext = False
return self.red
else:
self.redNext = True
return self.black
def PrintBoard(self):
print(self.board)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import warnings
from tonnikala.languages.base import LanguageNode, ComplexNode, BaseGenerator
from slimit.parser import Parser
from slimit import ast
from slimit.ast import *
from collections import Iterable
from slimit.scope import SymbolTable
from slimit.parser import Parser
from slimit.visitors.scopevisitor import (
Visitor,
ScopeTreeVisitor,
fill_scope_references,
mangle_scope_tree,
NameManglerVisitor,
)
from ...compat import string_types
class FreeVariableAnalyzerVisitor(Visitor):
"""Mangles names.
Walks over a parsed tree and changes ID values to corresponding
mangled names.
"""
def __init__(self):
self.free_variables = set()
@staticmethod
def _is_mangle_candidate(id_node):
"""Return True if Identifier node is a candidate for mangling.
There are 5 cases when Identifier is a mangling candidate:
1. Function declaration identifier
2. Function expression identifier
3. Function declaration/expression parameter
4. Variable declaration identifier
5. Identifier is a part of an expression (primary_expr_no_brace rule)
"""
return getattr(id_node, '_mangle_candidate', False)
def visit_Identifier(self, node):
"""Mangle names."""
if not self._is_mangle_candidate(node):
return
name = node.value
symbol = node.scope.resolve(node.value)
if symbol is None:
self.free_variables.add(name)
HAS_ASSERT = False
try:
import sysconfig
HAS_ASSERT = bool(sysconfig.get_config_var('Py_DEBUG'))
except:
pass
name_counter = 0
ALWAYS_BUILTINS = '''
undefined
'''.split()
def Str(s):
return String(json.dumps(s, ensure_ascii=False))
def Name(id, ctx=None):
return Identifier(id)
def Load():
pass
Store = Load
Expr = ExprStatement
def Attribute(value, attr, ctx=None):
return DotAccessor(value, Name(attr))
def SimpleCall(func, args=None):
# bad naming?
return FunctionCall(identifier=func, args=args)
JSAssign = Assign
def Assign(targets, value):
if len(targets) != 1:
raise TypeError("Only single assignments supported")
return JSAssign(op='=', left=targets[0], right=value)
def AssignNewVariable(targets, value):
return VarStatement([VarDecl(targets[0], value)])
JSReturn = Return
def Return(value=None):
return JSReturn(expr=value)
def SimpleFunctionDef(name, arguments=()):
arguments = list(arguments)
return FuncDecl(
identifier=Name(name),
parameters=arguments,
elements=[]
)
def assign_func_body(funcdecl, new_body=None):
funcdecl.elements = [] if new_body is None else new_body
return new_body
def get_body(funcdecl):
return funcdecl.elements
def NameX(id, store=False):
return Name(id=id, ctx=Load() if not store else Store())
class FreeVarFinder(object):
def __init__(self, tree):
self.tree = tree
@classmethod
def for_ast(cls, tree):
return cls(tree)
def get_free_variables(self):
sym_table = SymbolTable()
visitor = ScopeTreeVisitor(sym_table)
visitor.visit(self.tree)
fill_scope_references(self.tree)
free_var_analysis = FreeVariableAnalyzerVisitor()
free_var_analysis.visit(self.tree)
return free_var_analysis.free_variables
def parse(expression, mode='eval'):
if mode == 'eval':
return Parser().parse(expression).children()[0].expr
elif mode == 'exec':
return Parser().parse(expression).children()
raise TypeError("Only eval, exec modes allowed")
def get_expression_ast(expression, mode='eval'):
if not isinstance(expression, string_types):
return expression
tree = parse(expression, mode=mode)
return tree
def get_func_name(func):
return func.identifier.value
def gen_name():
global name_counter
name_counter += 1
return "__TK__%d__" % name_counter
def static_eval(expr):
if isinstance(expr, UnaryOp) and isinstance(expr.op, Not):
return not static_eval(expr.operand)
return literal_eval(expr)
def static_expr_to_bool(expr):
try:
return bool(static_eval(expr))
except:
return None
class JavascriptNode(LanguageNode):
is_top_level = False
def generate_output_ast(self, code, generator, parent, escape=False):
func = Name(id='__TK__output', ctx=Load())
if not isinstance(code, list):
code = [ code ]
rv = []
for i in code:
e = Expr(SimpleCall(func, [i]))
e.output_args = [i]
rv.append(e)
return rv
def make_buffer_frame(self, body):
new_body = []
new_body.append(AssignNewVariable(
targets=[
NameX('__TK__output', store=True),
],
value=SimpleCall(
NameX('__TK__mkbuffer')
)
))
new_body.extend(body)
new_body.append(Return(value=NameX('__TK__output')))
return new_body
def make_function(self, name, body, add_buffer=False, arguments=()):
func = SimpleFunctionDef(name, arguments=arguments)
new_body = assign_func_body(func, [])
if add_buffer:
new_body.extend(self.make_buffer_frame(body))
else:
new_body.extend(body)
if not new_body:
new_body.append(Pass())
return func
def generate_varscope(self, body):
name = gen_name()
rv = [
self.make_function(name, body,
arguments=['__TK__output', '__TK__escape']),
Expr(SimpleCall(NameX(name), [ NameX('__TK__output'), NameX('__TK__escape') ]))
]
return rv
class JsOutputNode(JavascriptNode):
def __init__(self, text):
super(JsOutputNode, self).__init__()
self.text = text
def get_expressions(self):
return [ self.get_expression() ]
def get_expression(self):
return Str(s=(self.text))
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator, parent)
class JsTranslatableOutputNode(JsOutputNode):
def __init__(self, text, needs_escape=False):
super(JsTranslatableOutputNode, self).__init__(text)
self.needs_escape = needs_escape
def get_expressions(self):
return [ self.get_expression() ]
def get_expression(self):
name = 'gettext'
if self.needs_escape:
name = 'egettext'
expr = SimpleCall(
NameX(name),
[Str(s=self.text)],
)
return expr
class JsExpressionNode(JavascriptNode):
def __init__(self, expression):
super(JsExpressionNode, self).__init__()
self.expr = expression
def get_expressions(self):
return [ self.get_expression() ]
def get_expression(self):
return SimpleCall(
NameX('__TK__escape'),
[ self.get_unescaped_expression() ]
)
def get_unescaped_expression(self):
return get_expression_ast(self.expr)
def generate_ast(self, generator, parent):
return self.generate_output_ast(self.get_expression(), generator, parent)
class JsCodeNode(JavascriptNode):
def __init__(self, source):
super(JsCodeNode, self).__init__()
self.source = source
def generate_ast(self, generator, parent):
return get_expression_ast(self.source, mode='exec')
def coalesce_strings(args):
rv = []
str_on = None
for i in args:
if isinstance(i, Str):
if str_on:
str_on.s += i.s
continue
str_on = i
else:
str_on = None
rv.append(i)
return rv
class JsComplexNode(ComplexNode, JavascriptNode):
def generate_child_ast(self, generator, parent_for_children):
rv = []
for i in self.children:
rv.extend(i.generate_ast(generator, parent_for_children))
return rv
class JsIfNode(JsComplexNode):
def __init__(self, expression):
super(JsIfNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
test = get_expression_ast(self.expression)
boolean = static_expr_to_bool(test)
if boolean == False:
return []
if boolean == True:
return self.generate_child_ast(generator, parent)
node = If(
test,
Block(self.generate_child_ast(generator, self)),
)
return [ node ]
def JsUnlessNode(self, expression):
expression = get_expression_ast(expression)
expression = UnaryOp(op='!', value=expression)
return JsIfNode(expression)
class JsImportNode(JavascriptNode):
def __init__(self, href, alias):
super(JsImportNode, self).__init__()
self.alias = alias
self.href = href
def generate_ast(self, generator, parent):
node = Assign(
targets = [NameX(str(self.alias), store=True)],
value =
SimpleCall(
func=
Attribute(value=NameX('__TK__', store=False),
attr='importDefs', ctx=Load()),
args=[
NameX('__TK__context'),
Str(s=self.href)
]
)
)
generator.add_import_source(self.href)
if parent.is_top_level:
generator.add_top_level_import(str(self.alias), node)
return []
return [ node ]
class JsAttributeNode(JsComplexNode):
def __init__(self, name, value):
super(JsAttributeNode, self).__init__()
self.name = name
def get_expressions(self):
rv = []
for i in self.children:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent):
if len(self.children) == 1 and \
isinstance(self.children[0], JsExpressionNode):
# special case, the attribute contains a single
# expression, these are handled by
# _TK_output.output_boolean_attr,
# given the name, and unescaped expression!
return [ Expr(SimpleCall(
func=Attribute(
value=NameX('__TK__output'),
attr='attr',
ctx=Load()
),
args=[
Str(s=self.name),
self.children[0].get_unescaped_expression()
]
)) ]
# otherwise just return the output for the attribute code
# like before
return self.generate_output_ast(
[ Str(s=' %s="' % self.name) ] +
self.get_expressions() +
[ Str(s='"') ],
generator, parent
)
class JsAttrsNode(JavascriptNode):
def __init__(self, expression):
super(JsAttrsNode, self).__init__()
self.expression = expression
def generate_ast(self, generator, parent):
expression = get_expression_ast(self.expression)
output = SimpleCall(
NameX('__TK__output_attrs'),
args=[expression]
)
return self.generate_output_ast(output, generator, parent)
class JsForNode(JsComplexNode):
def __init__(self, expression, parts):
super(JsForNode, self).__init__()
self.vars = parts[0]
self.expression = parts[1]
def generate_contents(self, generator, parent):
body = get_expression_ast(
"__TK__foreach(%s, function (%s) { });" %
(self.expression, self.vars),
'exec'
)
for_node = body[0]
func_frame = for_node.expr.args[1]
func_frame.elements = self.generate_child_ast(generator, self)
return [ for_node ]
def generate_ast(self, generator, parent):
# return self.generate_varscope(self.generate_contents())
return self.generate_contents(generator, parent)
class JsDefineNode(JsComplexNode):
def __init__(self, funcspec):
super(JsDefineNode, self).__init__()
if '(' not in funcspec:
funcspec += '()'
self.funcspec = funcspec
def generate_ast(self, generator, parent):
body = get_expression_ast(
"function %s{}" % self.funcspec,
"exec"
)
def_node = body[0]
assign_func_body(def_node, self.make_buffer_frame(
self.generate_child_ast(generator, self),
))
# move the function out of the closure
if parent.is_top_level:
generator.add_top_def(get_func_name(def_node), def_node)
return []
return [ def_node ]
class JsComplexExprNode(JsComplexNode):
def get_expressions(self):
rv = []
for i in self.children:
if hasattr(i, 'get_expression'):
rv.append(i.get_expression())
else:
rv.extend(i.get_expressions())
return rv
def generate_ast(self, generator, parent=None):
return self.generate_output_ast(self.get_expressions(), generator, parent)
class JsBlockNode(JsComplexNode):
def __init__(self, name):
super(JsBlockNode, self).__init__()
self.name = name
def generate_ast(self, generator, parent):
is_extended = isinstance(parent, JsExtendsNode)
body = get_expression_ast(
"function %s () {}" % self.name,
"exec"
)
def_node = body[0]
assign_func_body(def_node, self.make_buffer_frame(
self.generate_child_ast(generator, self),
))
generator.add_block(self.name, def_node)
if not is_extended:
# call the block in place
return self.generate_output_ast(
[ SimpleCall(NameX(self.name), []) ],
self, parent
)
else:
return [ ]
class JsExtendsNode(JsComplexNode):
is_top_level = True
def __init__(self, href):
super(JsExtendsNode, self).__init__()
self.href = href
def generate_ast(self, generator, parent=None):
generator.make_extended_template(self.href)
return self.generate_child_ast(generator, self)
def ast_equals(tree1, tree2):
x1 = ast.dump(tree1)
x2 = ast.dump(tree2)
return x1 == x2
def coalesce_outputs(tree):
"""
Coalesce the constant output expressions
__output__('foo')
__output__('bar')
__output__(baz)
__output__('xyzzy')
into
__output__('foobar', baz, 'xyzzy')
"""
return tree
coalesce_all_outputs = True
if coalesce_all_outputs:
should_coalesce = lambda n: True
else:
should_coalesce = lambda n: n.output_args[0].__class__ is Str
class OutputCoalescer(NodeVisitor):
def visit(self, node):
# if - else expression also has a body! it is not we want, though.
if hasattr(node, 'body') and isinstance(node.body, Iterable):
# coalesce continuous string output nodes
new_body = []
output_node = None
def coalesce_strs():
if output_node:
output_node.value.args[:] = \
coalesce_strings(output_node.value.args)
for i in node.body:
if hasattr(i, 'output_args') and should_coalesce(i):
if output_node:
output_node.value.args.extend(i.output_args)
continue
output_node = i
else:
coalesce_strs()
output_node = None
new_body.append(i)
coalesce_strs()
node.body[:] = new_body
NodeVisitor.visit(self, node)
def check(self, node):
"""
Coalesce _TK_output(_TK_escape(literal(x))) into
_TK_output(x).
"""
if not ast_equals(node.func, NameX('__TK__output')):
return
for i in range(len(node.args)):
arg1 = node.args[i]
if not arg1.__class__.__name__ == 'Call':
continue
if not ast_equals(arg1.func, NameX('__TK__escape')):
continue
if len(arg1.args) != 1:
continue
arg2 = arg1.args[0]
if not arg2.__class__.__name__ == 'Call':
continue
if not ast_equals(arg2.func, NameX('literal')):
continue
if len(arg2.args) != 1:
continue
node.args[i] = arg2.args[0]
def visit_Call(self, node):
self.check(node)
self.generic_visit(node)
OutputCoalescer().visit(tree)
class JsRootNode(JsComplexNode):
def __init__(self):
super(JsRootNode, self).__init__()
is_top_level = True
def generate_ast(self, generator, parent=None):
main_body = self.generate_child_ast(generator, self)
extended = generator.extended_href
# do not generate __main__ for extended templates
if not extended:
main_func = self.make_function('__main__', main_body, add_buffer=True)
generator.add_top_def('__main__', main_func)
toplevel_funcs = generator.blocks + generator.top_defs
# analyze the set of free variables
free_variables = set()
for i in toplevel_funcs:
fv_info = FreeVarFinder.for_ast(i)
free_variables.update(fv_info.get_free_variables())
free_variables |= generator.top_level_names
# discard _TK_ variables, always builtin names undefined
# from free variables.
for i in list(free_variables):
if i.startswith('__TK__') or i in ALWAYS_BUILTINS:
free_variables.discard(i)
# discard the names of toplevel funcs from free variables
# free_variables.difference_update(generator.top_level_names)
modules = ['tonnikala/runtime'] + list(generator.import_sources)
if extended:
modules.append(extended)
# var_statement_vars = set(free_variables)|set(
code = 'define(%s, function(__TK__) {\n' % json.dumps(modules)
code += ' "use strict";\n'
code += ' var __TK__mkbuffer = __TK__.Buffer,\n'
code += ' __TK__escape = __TK__.escape,\n'
code += ' __TK__foreach = __TK__.foreach,\n'
code += ' literal = __TK__.literal,\n'
if extended:
code += ' __TK__parent_template = __TK__.load(%s),\n' % json.dumps(extended)
code += ' __TK__output_attrs = __TK__.outputAttrs,\n'
code += ' __TK__ctxadd = __TK__.addToContext,\n'
code += ' __TK__ctxbind = __TK__.bindFromContext;\n'
code += ' return function __TK__binder (__TK__context) {\n'
code += ' var %s;\n' % ',\n '.join(free_variables)
if extended:
# an extended template does not have a __main__ (it is inherited)
code += ' __TK__parent_template(__TK__context)\n'
for i in free_variables:
code += ' %s = __TK__ctxbind(__TK__context, "%s");\n' % (i, i)
code += ' return new __TK__.BoundTemplate(__TK__context);\n'
code += ' };\n'
code += '});\n'
tree = parse(code)
class LocatorAndTransformer(Visitor):
binder = None
def visit_FuncExpr(self, node):
if not node.identifier:
self.generic_visit(node)
return
name = node.identifier.value
if name == '__TK__binder' and not self.binder:
self.binder = node
return
self.generic_visit(node)
return node
locator = LocatorAndTransformer()
locator.visit(tree)
# inject the other top level funcs in the binder
binder = locator.binder
get_body(binder)[1:1] = toplevel_funcs
get_body(binder)[1:1] = generator.imports
coalesce_outputs(tree)
return tree.to_ecma()
class Generator(BaseGenerator):
OutputNode = JsOutputNode
TranslatableOutputNode = JsTranslatableOutputNode
IfNode = JsIfNode
ForNode = JsForNode
DefineNode = JsDefineNode
ComplexExprNode = JsComplexExprNode
ExpressionNode = JsExpressionNode
ImportNode = JsImportNode
RootNode = JsRootNode
AttributeNode = JsAttributeNode
AttrsNode = JsAttrsNode
UnlessNode = JsUnlessNode
ExtendsNode = JsExtendsNode
BlockNode = JsBlockNode
CodeNode = JsCodeNode
def __init__(self, ir_tree):
super(Generator, self).__init__(ir_tree)
self.blocks = []
self.top_defs = []
self.top_level_names = set()
self.extended_href = None
self.imports = []
self.import_sources = []
def add_bind_decorator(self, block):
name = block.identifier.value
return Expr(SimpleCall(Name('__TK__ctxadd'), [ Name('__TK__context'), Str(name), block ]))
def add_block(self, name, block):
self.top_level_names.add(name)
block = self.add_bind_decorator(block)
self.blocks.append(block)
def add_top_def(self, name, defblock):
self.top_level_names.add(name)
defblock = self.add_bind_decorator(defblock)
self.top_defs.append(defblock)
def add_top_level_import(self, name, node):
self.top_level_names.add(name)
self.imports.append(node)
def make_extended_template(self, href):
self.extended_href = href
def add_import_source(self, href):
self.import_sources.append(href)
|
# -*- coding: utf-8 -*-
'''
Manage the master configuration file
'''
from __future__ import absolute_import
# Import python libs
import logging
import os
# Import third party libs
import yaml
# Import salt libs
import salt.config
log = logging.getLogger(__name__)
def values():
'''
Return the raw values of the config file
'''
data = salt.config.master_config(__opts__['conf_file'])
return data
def apply(key, value):
'''
Set a single key
.. note::
This will strip comments from your config file
'''
path = __opts__['conf_file']
if os.path.isdir(path):
path = os.path.join(path, 'master')
data = values()
data[key] = value
with salt.utils.fopen(path, 'w+') as fp_:
fp_.write(yaml.dump(data, default_flow_style=False))
def update_config(file_name, yaml_contents):
'''
Update master config with
``yaml_contents``.
Writes ``yaml_contents`` to a file named
``file_name.conf`` under the folder
specified by ``default_include``.
This folder is named ``master.d`` by
default. Please look at
http://docs.saltstack.com/en/latest/ref/configuration/master.html#include-configuration
for more information.
Example low data::
data = {
'username': 'salt',
'password': 'salt',
'fun': 'config.update_config',
'file_name': 'gui',
'yaml_contents': {'id': 1},
'client': 'wheel',
'eauth': 'pam',
}
'''
file_name = '{0}{1}'.format(file_name, '.conf')
dir_path = os.path.join(__opts__['config_dir'],
os.path.dirname(__opts__['default_include']))
try:
yaml_out = yaml.safe_dump(yaml_contents, default_flow_style=False)
if not os.path.exists(dir_path):
log.debug('Creating directory {0}'.format(dir_path))
os.makedirs(dir_path, 755)
file_path = os.path.join(dir_path, file_name)
with salt.utils.fopen(file_path, 'w') as fp_:
fp_.write(yaml_out)
return 'Wrote {0}'.format(file_name)
except (IOError, OSError, yaml.YAMLError, ValueError) as err:
return str(err)
|
# -*- coding: utf-8 -*-
# Reference VOC scrips
# Author : Andy Liu
# last modify : 2020-08-15
# input : python count_classes_by_xml.py "/home/andy/data/xml"
import xml.dom.minidom
import random
import pickle
import os
import sys
import cv2
import argparse
from os import listdir, getcwd
from os.path import join
from tqdm import tqdm
WITH_IMAGE = True
REWRITE = False
classes = ["car", "person"]
count_dict = {}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('xml_dir', help='xml directory', type=str)
args = parser.parse_args()
return args
def count_num(xml_dir):
xml_dir = os.path.abspath(xml_dir)
if xml_dir[-1] == "/":
xml_dir = xml_dir[:-1]
filelist = os.listdir(xml_dir)
for f in tqdm(filelist):
fileInfor = f.split(".")
xml_path = os.path.join(xml_dir, f)
dom = xml.dom.minidom.parse(xml_path)
annotation = dom.documentElement
xml_img = annotation.getElementsByTagName('path')[0].childNodes[0].data
img_size = annotation.getElementsByTagName('size')[0]
w = int(img_size.getElementsByTagName("width")[0].childNodes[0].data)
h = int(img_size.getElementsByTagName("height")[0].childNodes[0].data)
for obj in annotation.getElementsByTagName('object'):
label = obj.getElementsByTagName('name')[0].childNodes[0].data
if label not in classes:
continue
if label in count_dict:
count_dict[label] += 1
else:
count_dict[label] = 1
print(count_dict)
if __name__ == '__main__':
args = parse_args()
xml_dir = args.xml_dir
if not os.path.exists(xml_dir):
print("Error !!! %s is not exists, please check the parameter"%xml_dir)
sys.exit(0)
count_num(xml_dir)
print("Done!")
|
from distutils.core import setup, Extension
import numpy
setup(name='pafprocess_ext', version='1.0',
ext_modules=[Extension('_pafprocess', ['pafprocess.cpp', 'pafprocess.i'], swig_opts=['-c++'],
depends=["pafprocess.h"], include_dirs=[numpy.get_include(), '.'])],
py_modules=["pafprocess"])
|
import time
import random
# global variable to store list of all available algorithms
algorithm_list = ["dfs_backtrack", "bin_tree"]
def depth_first_recursive_backtracker(maze, start_coor):
k_curr, l_curr = start_coor # Where to start generating
path = [(k_curr, l_curr)] # To track path of solution
maze.grid[k_curr][l_curr].visited = True # Set initial cell to visited
visit_counter = 1 # To count number of visited cells
visited_cells = list() # Stack of visited cells for backtracking
print("\nGenerating the maze with depth-first search...")
time_start = time.time()
while visit_counter < maze.grid_size: # While there are unvisited cells
neighbour_indices = maze.find_neighbours(
k_curr, l_curr
) # Find neighbour indicies
neighbour_indices = maze._validate_neighbours_generate(neighbour_indices)
if neighbour_indices is not None: # If there are unvisited neighbour cells
visited_cells.append((k_curr, l_curr)) # Add current cell to stack
k_next, l_next = random.choice(neighbour_indices) # Choose random neighbour
maze.grid[k_curr][l_curr].remove_walls(
k_next, l_next
) # Remove walls between neighbours
maze.grid[k_next][l_next].remove_walls(
k_curr, l_curr
) # Remove walls between neighbours
maze.grid[k_next][l_next].visited = True # Move to that neighbour
k_curr = k_next
l_curr = l_next
path.append((k_curr, l_curr)) # Add coordinates to part of generation path
visit_counter += 1
elif len(visited_cells) > 0: # If there are no unvisited neighbour cells
(
k_curr,
l_curr,
) = visited_cells.pop() # Pop previous visited cell (backtracking)
path.append((k_curr, l_curr)) # Add coordinates to part of generation path
print("Number of moves performed: {}".format(len(path)))
print("Execution time for algorithm: {:.4f}".format(time.time() - time_start))
maze.grid[maze.entry_coor[0]][maze.entry_coor[1]].set_as_entry_exit(
"entry", maze.num_rows - 1, maze.num_cols - 1
)
maze.grid[maze.exit_coor[0]][maze.exit_coor[1]].set_as_entry_exit(
"exit", maze.num_rows - 1, maze.num_cols - 1
)
for i in range(maze.num_rows):
for j in range(maze.num_cols):
maze.grid[i][
j
].visited = False # Set all cells to unvisited before returning grid
maze.generation_path = path
def binary_tree(maze, start_coor):
# store the current time
time_start = time.time()
# repeat the following for all rows
for i in range(0, maze.num_rows):
# check if we are in top row
if i == maze.num_rows - 1:
# remove the right wall for this, because we cant remove top wall
for j in range(0, maze.num_cols - 1):
maze.grid[i][j].remove_walls(i, j + 1)
maze.grid[i][j + 1].remove_walls(i, j)
# go to the next row
break
# repeat the following for all cells in rows
for j in range(0, maze.num_cols):
# check if we are in the last column
if j == maze.num_cols - 1:
# remove only the top wall for this cell
maze.grid[i][j].remove_walls(i + 1, j)
maze.grid[i + 1][j].remove_walls(i, j)
continue
# for all other cells
# randomly choose between 0 and 1.
# if we get 0, remove top wall; otherwise remove right wall
remove_top = random.choice([True, False])
# if we chose to remove top wall
if remove_top:
maze.grid[i][j].remove_walls(i + 1, j)
maze.grid[i + 1][j].remove_walls(i, j)
# if we chose top remove right wall
else:
maze.grid[i][j].remove_walls(i, j + 1)
maze.grid[i][j + 1].remove_walls(i, j)
print("Number of moves performed: {}".format(maze.num_cols * maze.num_rows))
print("Execution time for algorithm: {:.4f}".format(time.time() - time_start))
# choose the entry and exit coordinates
maze.grid[maze.entry_coor[0]][maze.entry_coor[1]].set_as_entry_exit(
"entry", maze.num_rows - 1, maze.num_cols - 1
)
maze.grid[maze.exit_coor[0]][maze.exit_coor[1]].set_as_entry_exit(
"exit", maze.num_rows - 1, maze.num_cols - 1
)
# create a path for animating the maze creation using a binary tree
path = list()
# variable for holding number of cells visited until now
visit_counter = 0
# created list of cell visited uptil now to for backtracking
visited = list()
# create variables to hold the coords of current cell
# no matter what the user gives as start coords, we choose the
k_curr, l_curr = (maze.num_rows - 1, maze.num_cols - 1)
# add first cell to the path
path.append((k_curr, l_curr))
# mark first cell as visited
begin_time = time.time()
# repeat until all the cells have been visited
while visit_counter < maze.grid_size: # While there are unvisited cells
# for each cell, we only visit top and right cells.
possible_neighbours = list()
try:
# take only those cells that are unvisited and accessible
if not maze.grid[k_curr - 1][l_curr].visited and k_curr != 0:
if not maze.grid[k_curr][l_curr].is_walls_between(
maze.grid[k_curr - 1][l_curr]
):
possible_neighbours.append((k_curr - 1, l_curr))
except:
print()
try:
# take only those cells that are unvisited and accessible
if not maze.grid[k_curr][l_curr - 1].visited and l_curr != 0:
if not maze.grid[k_curr][l_curr].is_walls_between(
maze.grid[k_curr][l_curr - 1]
):
possible_neighbours.append((k_curr, l_curr - 1))
except:
print()
# if there are still traversible cell from current cell
if len(possible_neighbours) != 0:
# select to first element to traverse
k_next, l_next = possible_neighbours[0]
# add this cell to the path
path.append(possible_neighbours[0])
# add this cell to the visited
visited.append((k_curr, l_curr))
# mark this cell as visited
maze.grid[k_next][l_next].visited = True
visit_counter += 1
# update the current cell coords
k_curr, l_curr = k_next, l_next
else:
# check if no more cells can be visited
if len(visited) != 0:
k_curr, l_curr = visited.pop()
path.append((k_curr, l_curr))
else:
break
for row in maze.grid:
for cell in row:
cell.visited = False
print(f"Generating path for maze took {time.time() - begin_time}s.")
maze.generation_path = path
|
""" solution to the josephus problem """
def safe_position(n):
"""
function to get the safe position
formulae
Initial(n) = 2^a +l
W(n) = 2l + 1;
where n = the total number
a = the power of two
l = the reminder after the power is deducted from n
"""
pow_two = 0
i = 0
while (n - pow_two) >= pow_two:
pow_two = 2**i
i = i+1
l = n - pow_two
safe_p =(2* l) +1
return safe_p
def main():
""" main function """
print("Input the number of in circle: ")
n = int(input())
print("Safe Position: ",safe_position(n))
main()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 20:55:19 2016
@author: ajaver
"""
import json
import os
from collections import OrderedDict
import zipfile
import numpy as np
import pandas as pd
import tables
from tierpsy.helper.misc import print_flush
from tierpsy.analysis.feat_create.obtainFeaturesHelper import WormStats
from tierpsy.helper.params import read_unit_conversions, read_ventral_side, read_fps
wcon_metadata_fields = ['id', 'lab', 'who', 'timestamp', 'temperature', 'humidity', 'arena',
'food', 'media', 'sex', 'stage', 'age', 'strain', 'protocol', 'interpolate', 'software']
def wcon_reformat_metadata(metadata_dict):
wcon_metadata = OrderedDict()
for field in wcon_metadata_fields:
if field in metadata_dict:
wcon_metadata[field] = metadata_dict[field]
wcon_metadata['@OMG'] = OrderedDict()
for field in metadata_dict:
if not field in wcon_metadata_fields:
wcon_metadata['@OMG'][field] = metadata_dict[field]
if '@OMG' in metadata_dict:
for field in metadata_dict['@OMG']:
wcon_metadata['@OMG'][field] = metadata_dict['@OMG'][field]
return wcon_metadata
def readMetaData(fname, provenance_step='FEAT_CREATE'):
def _order_metadata(metadata_dict):
ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',
'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',
'habituation', 'who', 'protocol', 'lab', 'software']
extra_fields = metadata_dict.keys() - set(ordered_fields)
ordered_fields += sorted(extra_fields)
ordered_metadata = OrderedDict()
for field in ordered_fields:
if field in metadata_dict:
ordered_metadata[field] = metadata_dict[field]
return ordered_metadata
with tables.File(fname, 'r') as fid:
if not '/experiment_info' in fid:
experiment_info = {}
else:
experiment_info = fid.get_node('/experiment_info').read()
experiment_info = json.loads(experiment_info.decode('utf-8'))
provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()
provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))
if 'commit_hash' in provenance_tracking:
#old name
pkgs_versions = provenance_tracking['commit_hash']
else:
pkgs_versions = provenance_tracking['pkgs_versions']
if 'tierpsy' in pkgs_versions:
tierpsy_version = pkgs_versions['tierpsy']
else:
tierpsy_version = pkgs_versions['MWTracker']
MWTracker_ver = {"name":"tierpsy (https://github.com/ver228/tierpsy-tracker)",
"version": tierpsy_version,
"featureID":"@OMG"}
experiment_info["software"] = MWTracker_ver
return _order_metadata(experiment_info)
def __reformatForJson(A):
if isinstance(A, (int, float)):
return A
good = ~np.isnan(A) & (A != 0)
dd = A[good]
if dd.size > 0:
dd = np.abs(np.floor(np.log10(np.abs(dd)))-2)
precision = max(2, int(np.min(dd)))
A = np.round(A.astype(np.float64), precision)
A = np.where(np.isnan(A), None, A)
#wcon specification require to return a single number if it is only one element list
if A.size == 1:
return A[0]
else:
return A.tolist()
def __addOMGFeat(fid, worm_feat_time, worm_id):
worm_features = OrderedDict()
#add time series features
for col_name, col_dat in worm_feat_time.iteritems():
if not col_name in ['worm_index', 'timestamp']:
worm_features[col_name] = col_dat.values
worm_path = '/features_events/worm_%i' % worm_id
worm_node = fid.get_node(worm_path)
#add event features
for feature_name in worm_node._v_children:
feature_path = worm_path + '/' + feature_name
worm_features[feature_name] = fid.get_node(feature_path)[:]
return worm_features
def _get_ventral_side(features_file):
ventral_side = read_ventral_side(features_file)
if not ventral_side or ventral_side == 'unknown':
ventral_type = '?'
else:
#we will merge the ventral and dorsal contours so the ventral contour is clockwise
ventral_type='CW'
return ventral_type
def _getData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):
if IS_FOR_WCON:
lab_prefix = '@OMG '
else:
lab_prefix = ''
with pd.HDFStore(features_file, 'r') as fid:
if not '/features_timeseries' in fid:
return {} #empty file nothing to do here
features_timeseries = fid['/features_timeseries']
feat_time_group_by_worm = features_timeseries.groupby('worm_index');
ventral_side = _get_ventral_side(features_file)
with tables.File(features_file, 'r') as fid:
#fps used to adjust timestamp to real time
fps = read_fps(features_file)
#get pointers to some useful data
skeletons = fid.get_node('/coordinates/skeletons')
dorsal_contours = fid.get_node('/coordinates/dorsal_contours')
ventral_contours = fid.get_node('/coordinates/ventral_contours')
#let's append the data of each individual worm as a element in a list
all_worms_feats = []
#group by iterator will return sorted worm indexes
for worm_id, worm_feat_time in feat_time_group_by_worm:
worm_id = int(worm_id)
#read worm skeletons data
worm_skel = skeletons[worm_feat_time.index]
worm_dor_cnt = dorsal_contours[worm_feat_time.index]
worm_ven_cnt = ventral_contours[worm_feat_time.index]
#start ordered dictionary with the basic features
worm_basic = OrderedDict()
worm_basic['id'] = str(worm_id)
worm_basic['head'] = 'L'
worm_basic['ventral'] = ventral_side
worm_basic['ptail'] = worm_ven_cnt.shape[1]-1 #index starting with 0
worm_basic['t'] = worm_feat_time['timestamp'].values/fps #convert from frames to seconds
worm_basic['x'] = worm_skel[:, :, 0]
worm_basic['y'] = worm_skel[:, :, 1]
contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))
worm_basic['px'] = contour[:, :, 0]
worm_basic['py'] = contour[:, :, 1]
if READ_FEATURES:
worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)
for feat in worm_features:
worm_basic[lab_prefix + feat] = worm_features[feat]
if IS_FOR_WCON:
for x in worm_basic:
if not x in ['id', 'head', 'ventral', 'ptail']:
worm_basic[x] = __reformatForJson(worm_basic[x])
#append features
all_worms_feats.append(worm_basic)
return all_worms_feats
def _getUnits(features_file, READ_FEATURES=False):
fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)
xy_units = microns_per_pixel_out[1]
time_units = fps_out[2]
units = OrderedDict()
units["size"] = "mm" #size of the plate
units['t'] = time_units #frames or seconds
for field in ['x', 'y', 'px', 'py']:
units[field] = xy_units #(pixels or micrometers)
if READ_FEATURES:
#TODO how to change microns to pixels when required
ws = WormStats()
for field, unit in ws.features_info['units'].iteritems():
units['@OMG ' + field] = unit
return units
def exportWCONdict(features_file, READ_FEATURES=False):
metadata = readMetaData(features_file)
metadata = wcon_reformat_metadata(metadata)
data = _getData(features_file, READ_FEATURES)
units = _getUnits(features_file, READ_FEATURES)
#units = {x:units[x].replace('degrees', '1') for x in units}
#units = {x:units[x].replace('radians', '1') for x in units}
wcon_dict = OrderedDict()
wcon_dict['metadata'] = metadata
wcon_dict['units'] = units
wcon_dict['data'] = data
return wcon_dict
def getWCOName(features_file):
return features_file.replace('_features.hdf5', '.wcon.zip')
def exportWCON(features_file, READ_FEATURES=False):
base_name = os.path.basename(features_file).replace('_features.hdf5', '')
print_flush("{} Exporting data to WCON...".format(base_name))
wcon_dict = exportWCONdict(features_file, READ_FEATURES)
wcon_file = getWCOName(features_file)
#with gzip.open(wcon_file, 'wt') as fid:
# json.dump(wcon_dict, fid, allow_nan=False)
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
wcon_txt = json.dumps(wcon_dict, allow_nan=False, separators=(',', ':'))
zf.writestr(zip_name, wcon_txt)
print_flush("{} Finised to export to WCON.".format(base_name))
if __name__ == '__main__':
features_file = '/Users/ajaver/OneDrive - Imperial College London/Local_Videos/single_worm/global_sample_v3/883 RC301 on food R_2011_03_07__11_10_27___8___1_features.hdf5'
#exportWCON(features_file)
wcon_file = getWCOName(features_file)
wcon_dict = exportWCONdict(features_file)
wcon_txt = json.dumps(wcon_dict, allow_nan=False, indent=4)
#%%
with zipfile.ZipFile(wcon_file, mode='w', compression=zipfile.ZIP_DEFLATED) as zf:
zip_name = os.path.basename(wcon_file).replace('.zip', '')
zf.writestr(zip_name, wcon_txt)
#%%
# import wcon
# wc = wcon.WCONWorms()
# wc = wc.load_from_file(JSON_path, validate_against_schema = False)
|
from django import forms
from django.contrib.auth.models import User
from .models import Profile, Posts, Business, Area
class EditProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ('user', 'location')
class AreaForm(forms.ModelForm):
class Meta:
model = Area
exclude = ('admin',)
class BusinessForm(forms.ModelForm):
class Meta:
model = Business
exclude = ('owner', 'hood', 'bs_logo')
class PostForm(forms.ModelForm):
class Meta:
model = Posts
exclude = ('user', 'hood')
|
# Excel Column Number
# https://www.interviewbit.com/problems/excel-column-number/
#
# Given a column title as appears in an Excel sheet, return its corresponding
# column number.
#
# Example:
#
# A -> 1
#
# B -> 2
#
# C -> 3
#
# ...
#
# Z -> 26
#
# AA -> 27
#
# AB -> 28
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : string
# @return an integer
def titleToNumber(self, A):
res = 0
for char in A:
diff = ord(char) - ord('A') + 1
res = res * 26 + diff
return res
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
if __name__ == "__main__":
s = Solution()
print(s.titleToNumber('AA'))
print(s.titleToNumber('A'))
|
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
--- Nokia 1830 PSS SWDM Performance Collection Tool
--- Created by Naseredin aramnejad
--- Tested on Python 3.7.x
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#___________________________________IMPORTS_____________________________________
import tkinter as tk
from tkinter import ttk
from tkintertable import TableModel
from tkintertable import TableCanvas
from multiprocessing import freeze_support
from multiprocessing import Pool
from multiprocessing import Process
from threading import Thread
from webbrowser import open_new
from telnetlib import Telnet
from sys import exit
from time import sleep
from hashlib import md5
from ast import literal_eval
from datetime import datetime
from os import mkdir,chdir
#___________________________________GLOBALS_____________________________________
_version_ = 2.20
NodeCounter = 0
stepSize = 0
isTrial = True
Templates = []
Network_Data = []
Collected_Data = []
verifyResult = False
processQTY = 1
tableData = {}
results = []
standard_WDM_Events = ('All','Admin State',
'Operation State',
'LED Status',
'RX Power',
'TX Power',
'Facility Loopback',
'Terminal Loopback',
'Laser Temperature',
'TX Frequency',
'RX Frequency',
'Pre-FEC BER',
'Post-FEC BER')
standard_ETH_Events = ('All','Rx Broadcast Packets',
'Rx Collisions',
'Rx CRC Alignment Errors',
'Rx Drop Events',
'Rx Fragments',
'Rx Multicast Packets',
'Rx Octets',
'Rx Oversized Packets',
'Rx Packet Error Ratio',
'Rx Packets',
'Rx Packets 1024 to 1518 Bytes',
'Rx Packets 128 to 255 Bytes',
'Rx Packets 256 to 511 Bytes',
'Rx Packets 512 to 1023 Bytes',
'Rx Packets 64 Bytes',
'Rx Packets 65 to 127 Bytes',
'Rx Undersized Packets',
'Tx Broadcast Packets',
'Tx Collisions',
'Tx CRC Alignment Errors',
'Tx DropEvents',
'Tx Fragments',
'Tx Multicast Packets',
'Tx Octets',
'Tx Oversized Packets',
'Tx Packets',
'Tx Packet Error Ratio',
'Tx Packets 1024 to 1518 Bytes',
'Tx Packets 128 to 255 Bytes',
'Tx Packets 256 to 511 Bytes',
'Tx Packets 512 to 1023 Bytes',
'Tx Packets 64 Bytes',
'Tx Packets 65 to 127 Bytes',
'Tx Undersized Packets')
standard_OSC_OPT_Events = ('Transmit Power Average',)
standard_OSC_OPR_Events = ('Receive Power Average',)
#_______________________________Rotating BPar Class_(Indeterminate)_____________
class Rotbar:
def __init__(self,parent,speed=0.05,
aspect=1,row=0,column=0,
padx=0,pady=0,file='',sticky1='nw',columnspan=1,rowspan=1):
self.speed = speed
self.aspect = aspect
self.parent = parent
self.filepath = file
self.row = row
self.column = column
self.padx = padx
self.pady = pady
self.sticky1 = sticky1
self.columnspan = columnspan
self.rowspan = rowspan
self.label = ttk.Label(self.parent)
def init(self):
self.Status = True
self.label.grid(row=self.row,column=self.column,padx=self.padx,
pady=self.pady,sticky=self.sticky1,
columnspan=self.columnspan,rowspan=self.rowspan)
def gif_to_list(self,gif):
l1,i = [],1
while True:
try:
l1.append(tk.PhotoImage(file=gif,format = f'gif -index {i}'))
i += 1
except tk.TclError:
return l1
def execute(self):
piclist = self.gif_to_list(self.filepath)
while True:
if self.Status == True:
for i in piclist:
i = i.subsample(self.aspect,self.aspect)
self.label.configure(image=i)
self.parent.update()
sleep(self.speed)
else:
break
def stop(self):
self.label.grid_forget()
self.Status = False
#_______________________________NE Adapter Class________________________________
class NE_Adapter:
def __init__(self , NodeIP , NodeLogin , Commands,port=23):
self.NodeIP = NodeIP
self.port = port
self.NodeLogin = NodeLogin
self.Commands = Commands
self.CommandsResult = {}
self.CollectionStatus = False
self.tn = Telnet()
if self.Conn_Init() == True:
for i in self.Commands:
self.CommandsResult[i] = self.CMD_Exec(i)
self.Conn_Terminator()
self.CollectionStatus = True
else:
print(self.NodeIP + ' is not Reachable')
def Conn_Init(self):
try:
self.tn.open(self.NodeIP,self.port,timeout=3)
self.tn.read_until(b'login:')
self.tn.write((self.NodeLogin[0] + "\n").encode('ascii'))
self.tn.read_until(b'Username: ')
self.tn.write((self.NodeLogin[1] + "\n").encode('ascii'))
self.tn.read_until(b'Password: ')
self.tn.write((self.NodeLogin[2] + "\n").encode('ascii'))
self.tn.read_until(b'(Y/N)?')
self.tn.write(('y' + "\n").encode('ascii'))
self.tn.read_until(b'#')
self.tn.write(('paging status disabled' + "\n").encode('ascii'))
self.tn.read_until(b'#')
return True
except:
return False
def CMD_Exec(self,cmd):
self.tn.write((cmd + "\n").encode('ascii'))
data = self.tn.read_until(b"#")
return data.decode('utf-8')
def Conn_Terminator(self):
self.tn.write(('logout\n').encode('ascii'))
self.tn.close()
#_______________________________Custom TextBox Class____________________________
class CustomText(tk.Text):
def __init__(self, *args, **kwargs):
tk.Text.__init__(self, *args, **kwargs)
self._orig = self._w + "_orig"
self.tk.call("rename", self._w, self._orig)
self.tk.createcommand(self._w, self._proxy)
def _proxy(self, *args):
cmd = (self._orig,) + args
result = self.tk.call(cmd)
if (args[0] in ("insert", "delete") or
args[0:3] == ("mark", "set", "insert")):
self.event_generate("<<CursorChange>>", when="tail")
return result
#_______________________________New Template Class______________________________
class Template_Builder:
def __init__(self,name=None,lstSelectedEvents=None,lstTempl=None):
self.name = name
if name != None:
self.events = self.EventCollector(lstSelectedEvents)
self.ports = self.PortCollector(lstTempl)
self.Commands = self.CMDGen()
elif name == None:
self.events = None
self.ports = None
self.Commands = None
def EventCollector(self,lstSelectedEvents):
event_list = {}
for i in lstSelectedEvents:
tmp = i.split('•••')
if tmp[0] not in event_list:
event_list[tmp[0]] = []
event_list[tmp[0]].append(tmp[1])
return event_list
def PortCollector(self,lstTempl):
tmpl = {}
for i in lstTempl:
tmp = i.split('•••')
if tmp[1] not in tmpl:
tmpl[tmp[1]] = []
tmpl[tmp[1]].append(tmp[2]+'•••'+tmp[3])
return tmpl
def CMDGen(self):
Commands = {}
for i in self.ports:
for j in self.ports[i]:
tmp = j.split('•••')
if i not in Commands:
Commands[i] = []
Commands[i].append('show interface ' + tmp[0] + ' '+tmp[1] +
' detail')
if 'Ethernet' in self.events:
for i in self.ports:
for j in self.ports[i]:
tmp = j.split('•••')
if i not in Commands:
Commands[i] = []
Commands[i].append('show interface '+tmp[0]+' '+tmp[1]+
' PM ethernet 0 0')
if 'OSC_OPT' in self.events:
for i in self.ports:
for j in self.ports[i]:
tmp = j.split('•••')
if i not in Commands:
Commands[i] = []
Commands[i].append('show interface '+tmp[0]+' '+tmp[1]+
' PM opt 0 0')
if 'OSC_OPR' in self.events:
for i in self.ports:
for j in self.ports[i]:
tmp = j.split('•••')
if i not in Commands:
Commands[i] = []
Commands[i].append('show interface '+tmp[0]+' '+tmp[1]+
' PM opr 0 0')
return(Commands)
#_______________________________________________________________________________
def LoadCards():
btnLoadCard.grid_forget()
if cmbNetData.get() != '':
valid_cards = ['20MX80','20AX200','1UX100','2UX200','11DPM12','11DPM8',
'12P120','30AN300','4UC400','4AN400','2UC400','AHPHG',
'IROADMV','AHPLG','A2325A']
login = []
Cardlist = []
nodeip = cmbNetData.get().split('•••')[1]
for i in Network_Data:
if i[1] == nodeip:
login.append(i[2])
login.append(i[3])
login.append(i[4])
break
Command = ['show slot *']
obj = NE_Adapter(nodeip,login,Command)
if obj.CollectionStatus == False:
pbar1.stop()
btnLoadCard.grid(column=2,row=3,padx=(0,5),sticky='nw')
return
Cards = obj.CommandsResult[Command[0]].splitlines()
del(obj)
for i in Cards:
if '/' in i:
l0 = i.find(' ',i.find('/'))
tmp = i[l0+1:i.find(' ',l0)]
if tmp in valid_cards:
Cardlist.append(tmp)
Cardlist = sorted(Cardlist)
cmbCards.config(state='readonly')
cmbCards['values'] = tuple(set(Cardlist))
cmbCards.set(cmbCards['values'][0])
pbar1.stop()
btnLoadCard.grid(column=2,row=3,padx=(0,5),sticky='nw')
else:
tk.messagebox.showerror('Error','Select the node first!')
#_______________________________________________________________________________
def Init_Interface(event):
if OnlineVar.get() == 2:
cmbInterfaces['values'] = ()
cmbInterfaces.set('')
cmbInterfaces.config(state='disabled')
#_______________________________________________________________________________
def Init_Shelfs(event):
if OnlineVar.get() == 2:
cmbCards['values'] = ()
cmbCards.set('')
cmbCards.config(state='disabled')
Init_Interface(event)
#_______________________________________________________________________________
def LoadInts():
btnLoadInt.grid_forget()
if cmbCards.get() != '':
login = []
Intlist = []
nodeip = cmbNetData.get().split('•••')[1]
for i in Network_Data:
if i[1] == nodeip:
login.append(i[2])
login.append(i[3])
login.append(i[4])
break
Command = ['show interface ' + cmbCards.get() + ' *']
obj = NE_Adapter(nodeip,login,Command)
if obj.CollectionStatus == False:
pbar2.stop()
btnLoadInt.grid(column=2,row=4,padx=(0,5),sticky='nw')
return
Cards = obj.CommandsResult[Command[0]].splitlines()
del(obj)
for i in Cards:
if '/' in i:
l0 = i.find('/')
tmp = i[l0-1:i.find(' ',l0)]
if cmbCards.get() not in ['ahplg','AHPLG','iroadmv',
'IROADMV','ahphg','AHPHG','a2325a',
'A2325A']:
Intlist.append(tmp)
else:
if 'OSC' in tmp:
Intlist.append(tmp)
cmbInterfaces.config(state='readonly')
cmbInterfaces['values'] = tuple(Intlist)
cmbInterfaces.set(cmbInterfaces['values'][0])
pbar2.stop()
btnLoadInt.grid(column=2,row=4,padx=(0,5),sticky='nw')
else:
tk.messagebox.showerror('Error','Select the card first!')
#_______________________________________________________________________________
def GoOffline():
btnLoadInt.config(state='disable')
btnLoadCard.config(state='disable')
cmbShelfs.config(state='readonly')
cmbCards.config(state='readonly')
cmbInterfaces.grid_forget()
txtPortAdd.grid(column=1,row=4,padx=(0,10),sticky='nw')
#_______________________________________________________________________________
def GoOnline():
cmbShelfs.set('')
cmbCards.set('')
cmbInterfaces.set('')
btnLoadInt.config(state='normal')
btnLoadCard.config(state='normal')
cmbShelfs.config(state='disable')
cmbCards.config(state='disable')
cmbInterfaces.config(state='disable')
txtPortAdd.grid_forget()
cmbInterfaces.grid(column=1,row=4,padx=(0,10),sticky='nw')
#_______________________________________________________________________________
def TableExport(meth='Manual'):
folderflag = False
global tableData
titleflag = False
if meth=='Manual':
filename = tk.filedialog.asksaveasfile(mode='w',defaultextension=".csv",
filetypes = (("CSV files","*.csv"),("all files","*.*")))
if filename == None:
return
elif meth=='Auto':
try:
chdir(log_Folder)
folderflag = True
except:
print('Couldn\'t switch to the log folder...')
pass
datetimeobj = datetime.now()
currentdatetime = str(datetimeobj.year)+str(datetimeobj.month)+\
str(datetimeobj.day)+str(datetimeobj.hour)+str(datetimeobj.minute)+\
str(datetimeobj.second)
filename_name = 'Performance_Export_'+ currentdatetime+'.csv'
filename = open(filename_name,'w')
for i in tableData:
if titleflag == False:
for j in tableData[i]:
filename.write(str(j)+',')
filename.write('\n')
titleflag = True
for j in tableData[i]:
filename.write(str(tableData[i][j])+',')
filename.write('\n')
filename.close()
if folderflag == True:
chdir('..')
folderflag = False
#______________________________ Process QTY Update _____________________________
def updateValue(e):
global processQTY
tmp = scaleVal.get()
lblCurrentPrQtyValue.config(text=str(int((tmp))))
processQTY = int(tmp)
#______________________________Event Extractor Function_________________________
def event_Selector(cardinfo,standard_event):
global standard_WDM_Events , standard_ETH_Events, standard_OSC_OPT_Events
global standard_OSC_OPR_Events
ethernet_events = {'Rx Broadcast Packets':'Rx Broadcast Packets',
'Rx Collisions':'Rx Collisions',
'Rx CRC Alignment Errors':'Rx CRC Alignment Errors',
'Rx Drop Events':'Rx Drop Events',
'Rx Fragments':'Rx Fragments',
'Rx Multicast Packets':'Rx Multicast Packets',
'Rx Octets':'Rx Octets',
'Rx Oversized Packets':'Rx Oversized Packets',
'Rx Packet Error Ratio':'Rx Packet Error Ratio',
'Rx Packets':'Rx Packets',
'Rx Packets 1024 to 1518 Bytes':'Rx Packets 1024 to 1518 Bytes',
'Rx Packets 128 to 255 Bytes':'Rx Packets 128 to 255 Bytes',
'Rx Packets 256 to 511 Bytes':'Rx Packets 256 to 511 Bytes',
'Rx Packets 512 to 1023 Bytes':'Rx Packets 512 to 1023 Bytes',
'Rx Packets 64 Bytes':'Rx Packets 64 Bytes',
'Rx Packets 65 to 127 Bytes':'Rx Packets 65 to 127 Bytes',
'Rx Undersized Packets':'Rx Undersized Packets',
'Tx Broadcast Packets':'Tx Broadcast Packets',
'Tx Collisions':'Tx Collisions',
'Tx CRC Alignment Errors':'Tx CRC Alignment Errors',
'Tx DropEvents':'Tx DropEvents',
'Tx Fragments':'Tx Fragments',
'Tx Multicast Packets':'Tx Multicast Packets',
'Tx Octets':'Tx Octets',
'Tx Oversized Packets':'Tx Oversized Packets',
'Tx Packet Error Ratio':'Tx Packet Error Ratio',
'Tx Packets':'Tx Packets',
'Tx Packets 1024 to 1518 Bytes':'Tx Packets 1024 to 1518 Bytes',
'Tx Packets 128 to 255 Bytes':'Tx Packets 128 to 255 Bytes',
'Tx Packets 256 to 511 Bytes':'Tx Packets 256 to 511 Bytes',
'Tx Packets 512 to 1023 Bytes':'Tx Packets 512 to 1023 Bytes',
'Tx Packets 64 Bytes':'Tx Packets 64 Bytes',
'Tx Packets 65 to 127 Bytes':'Tx Packets 65 to 127 Bytes',
'Tx Undersized Packets':'Tx Undersized Packets'}
osc_events = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_4uc400 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_30an300 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_4an400 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_2uc400 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_20ax200 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_20mx80 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_1ux100 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_2ux200 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Laser Temperature':'Laser Case Temperature',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_12p120 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'Laser Temperature':'Laser Temperature',
'TX Power':'Transmitted Power',
'RX Power':'Received Power',
'TX Frequency':'Channel Tx',
'RX Frequency':'Channel Rx',
'Pre-FEC BER':'PreFecBer',
'Post-FEC BER':'PostFecBer',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_11dpm12 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'ChannelTx',
'RX Frequency':'ChannelRx',
'Pre-FEC BER':'Pre-Fec BER',
'Post-FEC BER':'Post-Fec BER',
'Laser Temperature':'Laser Temperature'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Event_11dpm8 = {'wdm':{'Admin State':'Admin State',
'Operation State':'Oper State',
'LED Status':'Status LED',
'RX Power':'Received Power',
'TX Power':'Transmitted Power',
'Facility Loopback':'Facility Loopback',
'Terminal Loopback':'Terminal Loopback',
'TX Frequency':'ChannelTx',
'RX Frequency':'ChannelRx',
'Pre-FEC BER':'Pre-Fec BER',
'Post-FEC BER':'Post-Fec BER',
'Laser Temperature':'Laser Temperature'},
'ethernet':ethernet_events,
'OSC_OPT':{'Transmit Power Average':'OPT Average (dBm)'},
'OSC_OPR':{'Receive Power Average':'OPR Average (dBm)'}}
Card_List = {'1ux100':Event_1ux100,'1UX100':Event_1ux100,
'20mx80':Event_20mx80,'20MX80':Event_20mx80,
'20AX200':Event_20ax200,'20ax200':Event_20ax200,
'4uc400':Event_4uc400,'4UC400':Event_4uc400,
'2uc400':Event_2uc400,'2UC400':Event_2uc400,
'2ux200':Event_2ux200,'2UX200':Event_2ux200,
'30AN300':Event_30an300,'30an300':Event_30an300,
'4AN400':Event_4an400,'4an400':Event_4an400,
'12p120':Event_12p120,'12P120':Event_12p120,
'11dpm12':Event_11dpm12,'11DPM12':Event_11dpm12,
'11DPM8':Event_11dpm8,'11dpm8':Event_11dpm8,
'ahphg':osc_events,'ahplg':osc_events,'iroadmv':osc_events,
'AHPHG':osc_events,'AHPLG':osc_events,'IROADMV':osc_events,
'a2325a':osc_events,'A2325A':osc_events}
Eventtype = ''
cardType = ''
if standard_event in standard_WDM_Events:
Eventtype = 'wdm'
elif standard_event in standard_OSC_OPT_Events:
Eventtype = 'OSC_OPT'
elif standard_event in standard_OSC_OPR_Events:
Eventtype = 'OSC_OPR'
elif standard_event in standard_ETH_Events:
Eventtype = 'ethernet'
for i in Card_List:
if i in cardinfo:
cardType = i
break
if cardType != '':
specific_event = Card_List[cardType][Eventtype][standard_event]
return (specific_event)
else:
return ''
#________________________________ Event Selector _______________________________
def port_perf(portinfo,event):
tmp = portinfo.splitlines()
target = event_Selector(tmp[0] , event)
l0 = l1 = l2 = 0
lineofs = ''
foundit = False
for i in tmp:
if target in i:
foundit = True
lineofs = i
l0 = i.find(target)
break
if foundit == True:
l1 = lineofs.find(":",l0) + 1
l2=lineofs.find(" ",l1)
if (l2 == -1 or 'ethernet' in tmp[0] or 'PM op' in tmp[0]) and\
('OSCSFP detail' not in tmp[0]):
result = lineofs[l1:].strip()
elif 'OSCSFP detail' in tmp[0]:
l1 = lineofs.find(target)
l2 = lineofs.find(":",l1)+1
l3 = lineofs.find(" ",l2+1)
if l3 == -1:
result = lineofs[l2:].strip()
else:
result = lineofs[l2:l3].strip()
else:
result = lineofs[l1:l2+1].strip()
return result
else:
return ''
# _________________________Tab3 Table loader__________________________________
def TableUpdate(TemplateOBJ):
print('entered!')
global Collected_Data , tableData
tableData = {}
extraction_dic = {}
current_Collected_OBJ = None
for L in TemplateOBJ.ports:
for f in Collected_Data:
if f.NodeIP == L:
current_Collected_OBJ = f
break
if current_Collected_OBJ.CollectionStatus == True:
for j in TemplateOBJ.ports[L]:
current_command = []
list1=j.split('•••')
for k in TemplateOBJ.Commands[L]:
if (list1[0] + ' ' + list1[1]) in k:
current_command.append(k)
for s in current_command:
if (L+'•••'+j) not in extraction_dic:
extraction_dic[L+'•••'+j] = {}
extraction_dic[L+'•••'+j][s] = current_Collected_OBJ.CommandsResult[s]
else:
print(current_Collected_OBJ.NodeIP, 'Is not Collected, Skipping...')
continue
Port_QTY = len(extraction_dic)
try:
if Port_QTY > 0:
for i in range(1,Port_QTY+1):
tableData[i] = {}
counter = 0
for h in extraction_dic:
counter += 1
tableData[counter]['Port'] = h
if 'WDM' in TemplateOBJ.events:
for m in TemplateOBJ.events['WDM']:
tmp = [x for x in extraction_dic[h] if 'detail' in x][0]
current_port_info1 = extraction_dic[h][tmp]
tableData[counter][m] = port_perf(current_port_info1,m)
if 'Ethernet' in TemplateOBJ.events:
for m in TemplateOBJ.events['Ethernet']:
tmp = [x for x in extraction_dic[h] if 'PM ethernet 0' in x][0]
current_port_info2 = extraction_dic[h][tmp]
tableData[counter][m] = port_perf(current_port_info2,m)
if 'OSC_OPT' in TemplateOBJ.events:
for m in TemplateOBJ.events['OSC_OPT']:
tmp = [x for x in extraction_dic[h] if 'PM opt' in x][0]
current_port_info3 = extraction_dic[h][tmp]
tableData[counter][m] = port_perf(current_port_info3,m)
if 'OSC_OPR' in TemplateOBJ.events:
for m in TemplateOBJ.events['OSC_OPR']:
tmp = [x for x in extraction_dic[h] if 'PM opr' in x][0]
current_port_info4 = extraction_dic[h][tmp]
tableData[counter][m] = port_perf(current_port_info4,m)
table = TableCanvas(lblFrame6, data=tableData,width=670,height=273)
table.grid_configure(padx=(1,6))
table.show()
btnExport.config(state='normal')
TableExport('Auto')
else:
print('All Nodes are unreachable!')
btnExport.config(state='disabled')
pass
except:
pass
# ________________________NE Communicator_____________________________________
def P_Executer(Node_Info):
global NodeCounter
NodeCounter += 1
print(str(NodeCounter) + ") "+ Node_Info[0] ,' Started...')
NodeObject = NE_Adapter(Node_Info[0],[Node_Info[2],Node_Info[3],
Node_Info[4]],Node_Info[1])
print(str(NodeCounter) + ") "+ Node_Info[0] ,' Finished!')
return NodeObject
# ______________________NE Communication Multiprocessing________________________
def collect_result(res):
global Collected_Data,stepSize
progress.step(stepSize)
Collected_Data.append(res)
# ______________________NE Communication Multiprocessing________________________
def NE_Collection_Initiator(All_Nodes,TemplateOBJ):
btnExecute.grid_forget()
global stepSize
global Collected_Data,NodeCounter,processQTY
stepSize = int(1000/len(All_Nodes))
NodeCounter = 0
lblCollectionStatusResult.configure(text='In Progress ',foreground='orange')
btnExecute.configure(state='disable')
p = Pool(int(processQTY))
for i in All_Nodes:
p.apply_async(P_Executer,args=(i,), callback=collect_result)
p.close()
p.join()
pbar3.stop()
btnExecute.grid(row=0,column=2,sticky='wn',padx=5,pady=5,columnspan=2)
btnExecute.configure(state='normal')
lblCollectionStatusResult.configure(text='Done ',foreground='green')
TableUpdate(TemplateOBJ)
# _________________________Perf Collector_____________________________________
def Collector():
global Templates
global Collected_Data,Network_Data
progressVar.set(0)
Collected_Data = []
all_nodes = []
if cmbTemplateList.get() != '':
Templatename = cmbTemplateList.get()
for i in Templates:
if i.name == Templatename:
TemplateOBJ = i
for j in TemplateOBJ.Commands:
node_info = []
node_info.append(j)
node_info.append(TemplateOBJ.Commands[j])
for i in Network_Data:
if i[1] == j:
node_info.append(i[2])
node_info.append(i[3])
node_info.append(i[4])
all_nodes.append(node_info)
NE_Collection_Initiator(all_nodes,TemplateOBJ)
else:
tk.messagebox.showerror('Error','Select the desired template first')
#_______________________________________________________________________________
def ActivatePbar(pbar):
pbar.init()
pbar.execute()
#_______________________________________________________________________________
def Proc_Pbar(pbar,proc):
Thread(target=ActivatePbar,args=[pbar]).start()
Thread(target=proc).start()
# ________________________Event add to Listbox________________________________
def Add2Event():
if cmbEventType.get() != '':
if cmbSelectedEvents.get() != '':
if cmbSelectedEvents.get() != 'All':
event = cmbEventType.get()+'•••'+cmbSelectedEvents.get()
if event not in lstSelectedEvents.get(0 , 'end'):
lstSelectedEvents.insert('end',
cmbEventType.get() + '•••' + cmbSelectedEvents.get())
else:
tk.messagebox.showinfo('Error','The event is already selected!')
else:
for i in cmbSelectedEvents['values']:
if i != 'All':
event = cmbEventType.get() + '•••' + i
if event not in lstSelectedEvents.get(0 , 'end'):
lstSelectedEvents.insert('end',event)
else:
tk.messagebox.showinfo('Error','Select the event first!')
else:
tk.messagebox.showinfo('Error','Select the event type first!')
# ________________________Event ComboBox Content______________________________
def cmbEventTypeOnSelect(event):
global standard_ETH_Events,standard_WDM_Events,standard_OSC_OPT_Events
global standard_OSC_OPR_Events
standard_ETH_Events = sorted(standard_ETH_Events)
standard_WDM_Events = sorted(standard_WDM_Events)
if cmbEventType.get() == 'Ethernet':
cmbSelectedEvents.set('')
cmbSelectedEvents['values'] = standard_ETH_Events
elif cmbEventType.get() == 'WDM':
cmbSelectedEvents.set('')
cmbSelectedEvents['values'] = standard_WDM_Events
elif cmbEventType.get() == 'OSC_OPT':
cmbSelectedEvents.set('')
cmbSelectedEvents['values'] = standard_OSC_OPT_Events
elif cmbEventType.get() == 'OSC_OPR':
cmbSelectedEvents.set('')
cmbSelectedEvents['values'] = standard_OSC_OPR_Events
# _________________________Port Input Validator_______________________________
def PortFormatVerify(PortInput):
LinePortQTY = {'2UC400':2,'4UC400':4,'20UC200':20,'30AN300':30,'4AN400':4,
'20AN80':20,'30SE300':30,'6SE300':6,'20MX80':20,'1UX100':1,
'2UX200':2,'4AX200':4,'20AX200':20,'11DPM12':2,'12P120':6,
'11QCE12X':4,'11OPE8':6,'11DPM8':2}
ClientPortQTY = {'11DPM12':12,'12P120':6,'11QCE12X':23,'11OPE8':2,
'11DPM8':8}
try:
temp = PortInput.split('/')
if len(temp) != 3:
return 'invalid'
if cmbShelfs.get() == '1830 PSS-8/16II/32':
if int(temp[0]) not in range(1,25) or\
int(temp[1]) not in range(1,33):
return 'invalid'
if cmbCards.get() == '11QCE12X' or cmbCards.get() == '11OPE8':
if (temp[2][0]) in 'xX':
if int(temp[2][1:]) in range(1,LinePortQTY[cmbCards.get()]+1):
return 'valid'
return 'invalid'
return 'invalid'
elif cmbCards.get() == 'IROADMV':
if temp[2].strip().upper() != 'OSCSFP':
return 'invalid'
else:
if (temp[2][0]) in 'cC':
if int(temp[2][1:]) not in range(1, ClientPortQTY[cmbCards.get()]+1):
return 'invalid'
elif (temp[2][0]) in 'lL':
if int(temp[2][1:]) not in range(1,LinePortQTY[cmbCards.get()]+1):
return 'invalid'
else:
return 'invalid'
return 'valid'
else:
if int(temp[0]) not in range(1,25) or\
int(temp[1]) not in range(1,25) or\
int(temp[2]) not in range(1,LinePortQTY[cmbCards.get()]+1):
return 'invalid'
return 'valid'
except:
return 'invalid'
# __________________________New Template On Click Function ___________________
def NewTemplate():
global Templates
if len(lstTempl.get(0,'end')) > 0:
if len(lstSelectedEvents.get(0,'end')) > 0:
answer = tk.simpledialog.askstring("Information","Template Name? ",
parent=root)
if answer == '' or answer == None:
tk.messagebox.showinfo('Wrong Name','The template name is not valid')
return
tmp = Template_Builder(answer,
lstSelectedEvents.get(0,'end'),lstTempl.get(0,'end'))
Templates.append(tmp)
tabs.tab(2,state='normal')
lstTempl.delete(0,'end')
lstSelectedEvents.delete(0,'end')
cmbShelfs.set('')
cmbCards.set('')
txtPortAdd.delete('1.0', "end")
txtPortAdd.insert("end", r'ex: 1/4/28')
txtPortAdd.configure(foreground='gray',background='white')
cmbEventType.set('')
cmbSelectedEvents.set('')
else:
tk.messagebox.showinfo('Error','Event list is Empty!')
else:
tk.messagebox.showinfo('Error','Port list is empty!')
tmplist = []
if len(Templates) > 0:
cmbTemplateList['values'] = ()
for i in Templates:
tmplist.append(i.name)
cmbTemplateList['values'] = tuple(tmplist)
# ____________________________Delete Selected Port____________________________
def DeletePort():
selection = lstTempl.curselection()
if selection:
lstTempl.delete(selection)
else:
tk.messagebox.showinfo("Nothing to delete!","Select the port first!")
# ___________________________Delete Selected Event____________________________
def DeleteEvent():
selection = lstSelectedEvents.curselection()
if selection:
lstSelectedEvents.delete(selection)
else:
tk.messagebox.showinfo("Nothing to delete!","Select the event first!")
# ____________________________Port add to ListBox_____________________________
def Add2Port():
if cmbNetData.get() != '':
if OnlineVar.get() == 1:
if cmbShelfs.get() != '':
if PortFormatVerify(txtPortAdd.get('1.0','end-1c')) == 'valid':
PortInfo = (cmbNetData.get()+'•••'+cmbCards.get()+
'•••'+txtPortAdd.get('1.0','end-1c'))
if PortInfo not in lstTempl.get(0,'end'):
lstTempl.insert('end',PortInfo)
else:
tk.messagebox.showinfo("Item Exists!",
"The port combination already exists!")
else:
tk.messagebox.showinfo("Error",
"The entered port address is not valid!")
else:
tk.messagebox.showinfo("Error",
"Select the shelf type first!")
else:
if cmbCards.get() != '':
if cmbInterfaces.get() != '':
PortInfo = (cmbNetData.get()+'•••'+cmbCards.get()+
'•••'+cmbInterfaces.get())
if PortInfo not in lstTempl.get(0,'end'):
lstTempl.insert('end',PortInfo)
else:
tk.messagebox.showinfo("Item Exists!",
"This port combination already exists!")
else:
tk.messagebox.showinfo("Error","Select the interface first!")
else:
tk.messagebox.showinfo("Error","Select the card first!")
else:
tk.messagebox.showinfo("Error",
"Select the node first!")
# ____________________________Shelf Type Select_______________________________
def ShelfOnSelect(event):
if cmbShelfs.get() == '1830 PSS-24x':
cmbCards.set('')
cmbCards['values'] = ('2UC400','4UC400','4AN400','30AN300')
cmbCards.set('4UC400')
elif cmbShelfs.get() == '1830 PSS-8x/12x':
cmbCards.set('')
cmbCards['values'] = ('20AX200','1UX100','20MX80','2UX200')
cmbCards.set('20AX200')
elif cmbShelfs.get() == '1830 PSS-8/16II/32':
cmbCards.set('')
cmbCards['values'] = ('11DPM12','12P120','11DPM8','IROADMV')
cmbCards.set('11DPM12')
# ____________________________Browse On Click Function _______________________
def btnBrowseCall():
btnBrowse.focus()
txtPath.delete('1.0','end-1c')
txtPath.insert('1.0',tk.filedialog.askopenfilename(initialdir='C:\\',
title = "Select file",
filetypes = (("CSV files","*.csv"),("all files","*.*"))))
txtPath.configure(foreground='black')
# ____________________________Verify On Click Function _______________________
def btnVerifyCall():
global Network_Data , verifyResult
btnVerify.focus()
Network_Data = []
verifyResult = True
filepath = txtPath.get('1.0','end-1c')
try:
with open(filepath) as file:
content = file.read().splitlines()
for i in content[1:]:
try:
tmp = i.split(',')
tmp1 = tmp[1].strip().split('.')
if (len(tmp) != 5) or (tmp[0].strip() == '') or\
(tmp[1].strip() == '') or (tmp[2].strip() == '') or\
(tmp[3].strip() == '') or (tmp[4].strip() == '') or\
(len(tmp1) != 4) or\
((int(tmp1[0]) < 1) or int(tmp1[0]) > 254) or\
((int(tmp1[1]) < 0) or int(tmp1[1]) > 254) or\
((int(tmp1[2]) < 0) or int(tmp1[2]) > 254) or\
((int(tmp1[3]) < 0) or (int(tmp1[3]) > 254)):
verifyResult = False
break
except:
verifyResult = False
break
Network_Data.append(tmp)
if verifyResult == False:
lblResult.configure(text='Verification Result: Failed',
foreground='red')
tabs.tab(1,state='normal')
else:
lblResult.configure(text='Verification Result: Success',
foreground='green')
tabs.tab(1,state='normal')
btn_LoadTemplate.config(state='normal')
cmbNetData['values']=()
tmpcontainer=[]
Network_Data.sort()
for i in Network_Data:
tmpcontainer.append(i[0]+'•••'+i[1])
cmbNetData['values']=tuple(tmpcontainer)
except FileNotFoundError:
tk.messagebox.showerror('Error','File not found!')
except PermissionError:
tk.messagebox.showerror('Error','The address contains something but its\
not accessible due to the lack of permission!')
# ____________________________txtPath on Change Function _____________________
def txtPathOnChange(event):
if txtPath.get('1.0','end-1c') != r'Enter the file path(ex: c:/data.csv)':
if txtPath.get('1.0','end-1c') != '':
btnVerify.configure(state='normal')
else:
btnVerify.configure(state='disabled')
else:
btnVerify.configure(state='disabled')
# __________________________ txtPath On Click Function _______________________
def txtPathOnClick(event):
if txtPath.get('1.0','end-1c') == r'Enter the file path(ex: c:/data.csv)':
txtPath.delete('1.0', "end")
txtPath.configure(foreground='black')
# __________________________ txtPortAdd On Click Function ____________________
def txtPortAddOnClick(event):
if txtPortAdd.get("1.0","end-1c") == r'ex: 1/4/28':
txtPortAdd.delete('1.0', "end")
txtPortAdd.configure(foreground='black')
# __________________________txtPortAdd On Change Function ____________________
def txtPortAddOnChange(event):
if txtPortAdd.get("1.0","end-1c") != (r'ex: 1/4/28'):
Verify = PortFormatVerify(txtPortAdd.get("1.0","end-1c"))
if Verify == 'valid':
txtPortAdd.configure(background='white')
else:
txtPortAdd.configure(background='Red')
# ____________________________txtPortAdd on FocusOut Function ________________
def txtPortAddOnFocusout(event):
if txtPortAdd.get("1.0","end-1c") == '':
txtPortAdd.configure(background='white',foreground='gray')
txtPortAdd.insert('1.0',r'ex: 1/4/28')
# ___________________________txtPath On FocusOut Function ____________________
def txtPathOnFocusOut(event):
if txtPath.get("1.0","end-1c") == '':
txtPath.insert('1.0',r'Enter the file path(ex: c:/data.csv)')
txtPath.configure(background='white',foreground='gray')
btnVerify.configure(state='disabled')
#__________________________________ Template Load File__________________________
def loadTemplatefile():
global Templates
added_Templates = 0
failed_IPs_message = ''
failed_IPs = []
tempOBJ = {}
currntTemplateNames = []
OBJ = None
IP_List = [i[1] for i in Network_Data]
if len(Templates) > 0:
for i in Templates:
currntTemplateNames.append(i.name)
filepath = tk.filedialog.askopenfilename(initialdir='C:\\',
title = "Select file",filetypes = (("tmplfile files","*.TMPLFILE"),
("all files","*.*")))
if filepath not in [None,'']:
with open(filepath,'r') as templatefile:
content = templatefile.read().splitlines()
for i in content:
failed_IPs = []
tempOBJ = literal_eval(i)
OBJ = Template_Builder()
for j in tempOBJ['ports']:
if j not in IP_List:
failed_IPs_message += (str(j) + ' was not included in Network Data file.\n')
failed_IPs.append(j)
if len(failed_IPs) > 0:
failed_IPs_message += ('Mismatch exists between the imported \
template and network data file.\nThe file cannot be imported!')
tk.messagebox.showerror('Invalid nodes',failed_IPs_message)
return
OBJ.name = tempOBJ['name']
OBJ.ports = tempOBJ['ports']
OBJ.events = tempOBJ['events']
OBJ.Commands = tempOBJ['Commands']
if OBJ.name in currntTemplateNames:
continue
else:
Templates.append(OBJ)
added_Templates += 1
if failed_IPs_message != '':
tk.messagebox.showinfo('Node info mismatch',failed_IPs_message)
tk.messagebox.showinfo('Template loading result',str(added_Templates)+
' new templates have been added')
tabs.tab(1,state='normal')
tabs.tab(2,state='normal')
tmplist = []
if len(Templates) > 0:
cmbTemplateList['values'] = ()
for i in Templates:
tmplist.append(i.name)
cmbTemplateList['values'] = tuple(tmplist)
#__________________________________ Template Save Func _________________________
def Export_Temp():
template_item = {}
global Templates
if len(Templates) == 0:
tk.messagebox.showerror('Error','The template database is empty.')
return
filename = tk.filedialog.asksaveasfile(mode='w',defaultextension=".tmplfile",
filetypes = (("Template files","*.tmplfile"),("all files","*.*")))
if filename not in [None,'']:
for i in Templates:
template_item['name']=i.name
template_item['ports']=i.ports
template_item['Commands']=i.Commands
template_item['events']=i.events
filename.write(str(template_item))
filename.write('\n')
filename.close()
#__________________________________ TRAL FUNCS _________________________________
def Trial(root):
sec = 0
while sec < 180:
sleep(1)
sec += 1
root.destroy()
#__________________________________ TRAL FUNCS _________________________________
def GoOnTrial():
process = Process(Trial(root))
process.start()
#__________________________________ TRAL FUNCS _________________________________
def PassCheck():
global isTrial
password_db = ('b09de2727f72ec243f846baafcfd380d',
'0f024f9bc6c8343526cea4b652baf73b',
'96aabcd6f2a75c05c73d16c1cfa99106',
'6632123f0f5a8dcf266f9c007f39ceb8',
'fd43104a480f535a06fbdf8e709f08be',
'49e5e34a2291174272af1e3791d3d2dc',
'deb84787a5075db49859ebd2a642ddb0',
'74d8afd340de7494870259fc4fe8ef39')
hexed = md5((txtPasswd.get()).encode('ascii'))
if hexed.hexdigest() in password_db:
isTrial = False
passwd_UI.destroy()
else:
tk.messagebox.showerror('Wrong password','Wrong password!')
#___________________________________ Time Trial Func ___________________________
def Trialer():
global isTrial
isTrial = True
passwd_UI.destroy()
#__________________________________Main Part____________________________________
if __name__ == '__main__':
freeze_support()
passwd_UI = tk.Tk()
passwd_UI.title('License Key Manager')
passwd_UI.geometry('400x50')
passwd_UI.resizable(0,0)
ttk.Label(passwd_UI,text='Key: ',foreground='Blue').grid(column=0,
row=0,padx=5,sticky='WN',pady=10)
txtPasswd = ttk.Entry(passwd_UI , width=20,foreground='red',show="*")
txtPasswd.grid(column=1,row=0,sticky='w',padx=5,pady=10)
btnFull = ttk.Button(passwd_UI,text='Check',command=PassCheck)
btnFull.grid(column=2,row=0,sticky='w',padx=5,pady=10)
ttk.Button(passwd_UI,text='3 Minutes Trial',command=Trialer).grid(column=3,
row=0,sticky='w',padx=5,pady=10)
passwd_UI.protocol("WM_DELETE_WINDOW", exit)
passwd_UI.bind('<Return>', lambda f2:btnFull.invoke())
passwd_UI.mainloop()
root = tk.Tk()
root.title('NOKIA 1830 PSS SWDM Performance Collection Tool - Release ' + str(_version_)+' - BETA')
#____________________TrialStart_________________________________________________
if isTrial == True:
Thread(target=GoOnTrial).start()
#____________________TrialEnd___________________________________________________
try:
root.call('wm','iconphoto', root._w, tk.PhotoImage(file='NLogo.png'))
except:
print('NLogo.png file not found! This message can be ignored...\n')
root.geometry('750x450')
root.resizable(0,0)
log_Folder = 'logs'
try:
mkdir(log_Folder)
except:
pass
# _____________________________________ Tabs Configurations __________________
tabs = ttk.Notebook(root)
tab1 = tk.Frame(tabs , height=422 , width=744)
tab2 = tk.Frame(tabs , height=422 , width=744)
tab3 = tk.Frame(tabs , height=422 , width=744)
tabs.add(tab1 , text='Network Data File')
tabs.add(tab2 , text='Performance Template' , state='normal')
tabs.add(tab3 , text='Performance Collector' , state='normal')
tabs.grid(column=0,row=0)
#___________________________ Tab1 Label Frames Configurations __________________
lblFrame1 = ttk.LabelFrame(tab1 , width=730, height=237 ,
text='Import network data file')
lblFrame1.grid(columnspan=4,sticky='WE',padx=5)
lblFrame2 = ttk.LabelFrame(tab1 , width=730, height=300 ,text='About')
lblFrame2.grid(columnspan=4,sticky='WE',padx=5)
#____________________________Tab1 Button Configurations_________________________
btnBrowse = ttk.Button(lblFrame1 , width=10 , text='Browse...' ,
command=btnBrowseCall)
btnBrowse.grid(column=0,row=0,padx=10,pady=5)
btnVerify = ttk.Button(lblFrame1 , width=10 , text='Verify' ,
command=btnVerifyCall , state='disabled')
btnVerify.grid(column=0,row=1,padx=10,pady=5)
btn_LoadTemplate = ttk.Button(lblFrame1 , width=10 , text='Load Templates',
state='disabled',command=loadTemplatefile)
btn_LoadTemplate.grid(column=0,row=2,padx=10,pady=5)
#____________________________Tab1 textbox Configurations________________________
txtPath = CustomText(lblFrame1 , width=70 ,height=1,foreground='gray')
txtPath.insert('1.0',r'Enter the file path(ex: c:/data.csv)')
txtPath.grid(column=1,row=0,sticky='w',columnspan=3,padx=(32,0))
txtPath.bind('<Button-1>', txtPathOnClick)
txtPath.bind('<<CursorChange>>', txtPathOnChange)
txtPath.bind('<FocusOut>', txtPathOnFocusOut)
#____________________________Tab1 Label Configurations__________________________
lblResult = ttk.Label(lblFrame1,text='Verification Result: TBD',width = 70)
lblResult.configure(foreground='blue')
lblResult.grid(column=1,row=1,padx=(30,10),pady=5,sticky='W')
#_______________________Explanations____________________________________________
ttk.Label(lblFrame2,text=' - Used 3rd-Party Libraries:',
foreground='Blue').grid(column=0,row=0,padx=(0,100),sticky='WN')
lblLicitem1 = ttk.Label(lblFrame2,text=' - tkintertable',foreground='Blue',
cursor="spider")
lblLicitem1.grid(column=0,row=1,padx=(10,100),sticky='WN')
url1 = r"https://github.com/dmnfarrell/tkintertable"
lblLicitem1.bind('<Button-1>', lambda f: open_new(url1))
ttk.Label(lblFrame2,text='-' * 126,
foreground='Gray').grid(column=0,row=2,sticky='WN')
ttk.Label(lblFrame2,text=' - Important Points:',
foreground='Blue').grid(column=0,row=3,padx=(0,100),sticky='WN')
ttk.Label(lblFrame2,text=' - Legacy OCS Shelves \
(PSS36/PSS64) are not supported.',foreground='Blue').grid(column=0,row=4,
padx=(10,100),sticky='WN')
ttk.Label(lblFrame2,text=' - Feel free to contact me in case of any \
suggestions or bug reports.',foreground='Blue').grid(column=0,row=5,
padx=(10,100),sticky='WN')
#_____________________________Author____________________________________________
lblAuthor = ttk.Label(lblFrame2,
text='Created by Naseredin Aramnejad (naseredin.aramnejad@nokia.com)',
foreground='Red',cursor='exchange')
lblAuthor.grid(column=0,row=10,padx=(0,367),pady=(150,3),sticky='WS')
email = r"MAILTO:naseredin.aramnejad@nokia.com"
lblAuthor.bind('<Button-1>', lambda f1:open_new(email))
#______________________________ Tab2 Label Frames Configurations _______________
lblFrame3 = ttk.LabelFrame(tab2 , width=730, height=120 ,text='Ports')
lblFrame3.grid(columnspan=4,sticky='WE',padx=7)
lblFrame4 = ttk.LabelFrame(tab2 , width=730, height=120 ,text='Events')
lblFrame4.grid(columnspan=10,sticky='wes',padx=5,column=0,row=1)
#_______________________________Tab2 Button Configurations______________________
pbar1 = Rotbar(parent=lblFrame3,column=2,row=3,padx=(0,5),sticky1='nw',
aspect=2,
file=r'D:\userdata\aramneja\Desktop\ezgif-5-cd5b298d317e.gif')
pbar2 = Rotbar(parent=lblFrame3,column=2,row=4,padx=(0,5),sticky1='nw',
aspect=2,
file=r'D:\userdata\aramneja\Desktop\ezgif-5-cd5b298d317e.gif')
ttk.Button(lblFrame3,text='Save Template',
command=NewTemplate).grid(column=1,row=6,pady=5,sticky='nw',
columnspan=4,ipadx=27,ipady=25)
btnLoadCard = ttk.Button(lblFrame3,text='...',
command=lambda :Proc_Pbar(pbar1,LoadCards),width=2,state='disabled')
btnLoadCard.grid(column=2,row=3,padx=(0,5),sticky='nw')
btnLoadInt = ttk.Button(lblFrame3,text='...',
command=lambda :Proc_Pbar(pbar2,LoadInts),width=2,state='disabled')
btnLoadInt.grid(column=2,row=4,padx=(0,5),sticky='nw')
ttk.Button(lblFrame3,text='Add to List',command=Add2Port).grid(column=3,
row=3,sticky='nw',rowspan=2)
ttk.Button(lblFrame3,text='Remove',command=DeletePort).grid(column=3,row=4,
sticky='nw',rowspan=3,pady=(0,3))
ttk.Button(lblFrame4,text='Add to List',command=Add2Event).grid(column=1,
row=2,padx=(0,107),sticky='nw',columnspan=2,ipadx=33,ipady=12)
ttk.Button(lblFrame4,text='Remove',command=DeleteEvent).grid(column=1,row=3,
padx=(0,107),sticky='nw',columnspan=2,ipadx=33,ipady=12)
ttk.Button(lblFrame3,text='Export Tmpl',command=Export_Temp).grid(column=2,
row=6,pady=5,sticky='nw',columnspan=4,ipadx=8,ipady=25)
#_______________________________Tab2 ListBox Configurations_____________________
lstTempl = tk.Listbox(lblFrame3,width=59,height=13)
lstTempl.grid(column=5,row=1,sticky='w',columnspan=2,rowspan=6,padx=(0,5),
pady=(0,5))
yscroll2 = tk.Scrollbar(lblFrame3,command=lstTempl.yview,orient='vertical')
yscroll2.grid(column=4,row=1,sticky='wsn',padx=(5,0),rowspan=6,pady=(0,5))
lstTempl.configure(yscrollcommand=yscroll2.set)
lstSelectedEvents = tk.Listbox(lblFrame4,width=61,height=9)
lstSelectedEvents.grid(column=5,row=0,sticky='wn',rowspan=5,pady=(5,10))
yscroll4 = tk.Scrollbar(lblFrame4,command=lstSelectedEvents.yview,
orient='vertical')
yscroll4.grid(column=4,row=0,sticky='wsn',padx=(12,0),pady=(5,10),rowspan=5)
# _______________________Tab2 Radio Buttons Configurations____________________
OnlineVar = tk.IntVar()
radio_frame = ttk.Frame(lblFrame3)
radio_frame.grid(column=2,columnspan=2,row=2)
ttk.Radiobutton(radio_frame, text='Offline',variable=OnlineVar,value=1,
command=GoOffline).grid(column=0,row=0)
ttk.Radiobutton(radio_frame, text='Online',variable=OnlineVar,value=2,
command=GoOnline).grid(column=1,row=0)
OnlineVar.set(1)
# ____________________________Tab2 ComboBos Configurations____________________
cmbNetData = ttk.Combobox(lblFrame3,state='readonly',width=40)
cmbNetData.grid(column=1,row=1,padx=(0,10),sticky='nw',columnspan=4)
cmbNetData.bind('<<ComboboxSelected>>', Init_Shelfs)
cmbShelfs= ttk.Combobox(lblFrame3,state='readonly')
cmbShelfs.grid(column=1,row=2,sticky='nw',pady=(1,0))
cmbShelfs['values']=('1830 PSS-8/16II/32','1830 PSS-24x','1830 PSS-8x/12x')
cmbShelfs.bind('<<ComboboxSelected>>', ShelfOnSelect)
cmbCards = ttk.Combobox(lblFrame3,state='readonly')
cmbCards.grid(column=1,row=3,sticky='nw',pady=(1,0))
cmbCards.bind('<<ComboboxSelected>>',Init_Interface)
cmbInterfaces= ttk.Combobox(lblFrame3,state='readonly')
cmbEventType= ttk.Combobox(lblFrame4,state='readonly')
cmbEventType.grid(column=1,row=0,padx=(0,10),sticky='nw')
cmbEventType['values']=('Ethernet','WDM','OSC_OPT','OSC_OPR')
cmbEventType.bind('<<ComboboxSelected>>', cmbEventTypeOnSelect)
cmbSelectedEvents= ttk.Combobox(lblFrame4,state='readonly')
cmbSelectedEvents.grid(column=1,row=1,padx=(0,10),sticky='nw')
# ____________________________Tab2 Text Box Configurations____________________
txtPortAdd = CustomText(lblFrame3,width=17,height=1)
txtPortAdd.grid(column=1,row=4,padx=(0,10),sticky='nw')
txtPortAdd.insert("end", r'ex: 1/4/28')
txtPortAdd.configure(foreground='gray')
txtPortAdd.bind('<Button-1>', txtPortAddOnClick)
txtPortAdd.bind('<<CursorChange>>', txtPortAddOnChange)
txtPortAdd.bind('<FocusOut>', txtPortAddOnFocusout)
# __________________________________ Tab2 Label Configurations _______________
ttk.Label(lblFrame3,text='Node: ').grid(column=0,row=1,padx=(5,0),
sticky='wn')
ttk.Label(lblFrame3,text='Shelf Type: ').grid(column=0,row=2,padx=(5,0),
sticky='wn')
ttk.Label(lblFrame3,text='Card Type: ').grid(column=0,row=3,padx=(5,0),
sticky='wn')
ttk.Label(lblFrame3,text='Port Address: ').grid(column=0,row=4,padx=(5,0),
sticky='wn')
ttk.Label(lblFrame4,text='Event Type: ').grid(column=0,row=0,padx=(5,0),
sticky='wn')
ttk.Label(lblFrame4,text='Event Name: ').grid(column=0,row=1,padx=(5,0),
sticky='wn')
# _____________________________ Tab3 Label Frame Configuration _______________
lblFrame5 = ttk.LabelFrame(tab3 , width=735, height=120 ,text='Collector')
lblFrame5.grid(columnspan=10,sticky='wes',padx=5,column=0,row=0)
lblFrame6 = ttk.LabelFrame(tab3 , width=735, height=334 ,text='Result')
lblFrame6.grid(columnspan=10,sticky='wes',padx=5,column=0,row=1)
Frame1 = ttk.Frame(lblFrame5 , width=300, height=50)
Frame1.grid(columnspan=2,sticky='wes',padx=5,column=2,row=1)
# _____________________________ Tab3 Label Configuration _____________________
ttk.Label(lblFrame5,text='Templates List: ').grid(row=0,column=0,padx=5,
pady=5,sticky='nw')
ttk.Label(lblFrame5,text='Collection Process Quantity: ').grid(row=1,
column=0,padx=5,pady=5,sticky='ne',columnspan=2)
ttk.Label(lblFrame5,text='Collection Status: ').grid(row=1,column=4,padx=5,
pady=5,sticky='nw')
lblCollectionStatusResult = ttk.Label(lblFrame5,text='TBD')
lblCollectionStatusResult.grid(row=1,column=5,padx=5,pady=5,sticky='nw',
columnspan=3)
ttk.Label(Frame1,text='1').grid(row=0,column=0,padx=3)
ttk.Label(Frame1,text='100').grid(row=0,column=2,padx=3)
ttk.Label(lblFrame5,text='Process Qty: ').grid(row=0,column=4)
lblCurrentPrQtyValue = ttk.Label(lblFrame5,text='1')
lblCurrentPrQtyValue.grid(row=0,column=5)
# _____________________________ Tab3 Button & Pbar Configuration _____________
progressVar = tk.DoubleVar()
progress = ttk.Progressbar(lblFrame5,orient='horizontal',length=250,
variable=progressVar,mode='determinate',maximum=1001,value=0)
progress.grid(row=0,column=3,padx=5,pady=5,columnspan=2,sticky='wn')
pbar3 = Rotbar(parent=lblFrame5,row=0,column=2,sticky1='wn',
padx=5,pady=5,columnspan=2,aspect=2,
file=r'D:\userdata\aramneja\Desktop\ezgif-5-cd5b298d317e.gif')
btnExecute = ttk.Button(lblFrame5,text='Collect',
command=lambda :Proc_Pbar(pbar3,Collector))
btnExecute.grid(row=0,column=2,sticky='wn',padx=5,pady=5,columnspan=2)
btnExport = ttk.Button(lblFrame5,text='Exp',command=TableExport,
state='disabled')
btnExport.configure(width=5)
btnExport.grid(row=0,column=6,sticky='wn',pady=5)
scaleVal = tk.DoubleVar()
scaleVal.set(1.0)
scale = ttk.Scale(Frame1,orient='horizontal',length=200,variable=scaleVal,
from_=1,to=100,command=updateValue)
scale.grid(row=0,column=1,padx=5,pady=5,sticky='wn')
# _____________________________ Tab3 Combobox Configuration __________________
cmbTemplateListVar = tk.StringVar()
cmbTemplateList = ttk.Combobox(lblFrame5,textvariable=cmbTemplateListVar,
state='readonly')
cmbTemplateList.grid(row=0,column=1,padx=5,pady=5,sticky='wn')
fdata = {1:{'Port':0,'Event 1':0}}
table = TableCanvas(lblFrame6, data=fdata,width=670,height=273).show()
# _____________________________________ Main Loop Execution __________________
root.mainloop()
|
import torch
import dgl.function as fn
import torch.nn as nn
from graphgallery.nn.layers.pytorch import activations
def drop_node(feats, drop_rate, training):
n = feats.shape[0]
drop_rates = torch.ones(n) * drop_rate
if training:
masks = torch.bernoulli(1. - drop_rates).unsqueeze(1)
feats = masks.to(feats.device) * feats
else:
feats = feats * (1. - drop_rate)
return feats
def GRANDConv(graph, feats, order):
'''
Parameters
-----------
graph: dgl.Graph
The input graph
feats: Tensor (n_nodes * feat_dim)
Node features
order: int
Propagation Steps
'''
with graph.local_scope():
''' Calculate Symmetric normalized adjacency matrix \hat{A} '''
degs = graph.in_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)
graph.ndata['norm'] = norm
graph.apply_edges(fn.u_mul_v('norm', 'norm', 'weight'))
''' Graph Conv '''
x = feats
y = 0 + feats
for i in range(order):
graph.ndata['h'] = x
graph.update_all(fn.u_mul_e('h', 'weight', 'm'), fn.sum('m', 'h'))
x = graph.ndata.pop('h')
y.add_(x)
return y / (order + 1)
class GRAND(nn.Module):
def __init__(self,
in_features,
out_features,
hids=[16],
acts=['relu'],
dropout=0.5,
S=1,
K=4,
temp=0.5,
lam=1.,
bias=False,
bn=False):
super().__init__()
mlp = []
for hid, act in zip(hids, acts):
if bn:
mlp.append(nn.BatchNorm1d(in_features))
mlp.append(nn.Linear(in_features,
hid,
bias=bias))
mlp.append(activations.get(act))
mlp.append(nn.Dropout(dropout))
in_features = hid
if bn:
mlp.append(nn.BatchNorm1d(in_features))
mlp.append(nn.Linear(in_features, out_features, bias=bias))
self.mlp = mlp = nn.Sequential(*mlp)
self.K = K
self.temp = temp
self.lam = lam
self.dropout = dropout
self.S = S
def forward(self, feats, graph):
X = feats
S = self.S
if self.training: # Training Mode
output_list = []
for _ in range(S):
drop_feat = drop_node(X, self.dropout, True) # Drop node
feat = GRANDConv(graph, drop_feat, self.K) # Graph Convolution
output_list.append(self.mlp(feat)) # Prediction
return output_list
else: # Inference Mode
drop_feat = drop_node(X, self.dropout, False)
X = GRANDConv(graph, drop_feat, self.K)
return self.mlp(X)
|
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
import json
import requests
import responses
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_none,
assert_is_not_none,
assert_not_equal,
assert_raises
)
from gocardless_pro.errors import MalformedResponseError
from gocardless_pro import resources
from gocardless_pro import list_response
from .. import helpers
@responses.activate
def test_payer_authorisations_get():
fixture = helpers.load_fixture('payer_authorisations')['get']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.get(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
@responses.activate
def test_timeout_payer_authorisations_get_retries():
fixture = helpers.load_fixture('payer_authorisations')['get']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.get(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
def test_502_payer_authorisations_get_retries():
fixture = helpers.load_fixture('payer_authorisations')['get']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.get(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_payer_authorisations_create():
fixture = helpers.load_fixture('payer_authorisations')['create']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.create(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
@responses.activate
def test_payer_authorisations_create_new_idempotency_key_for_each_call():
fixture = helpers.load_fixture('payer_authorisations')['create']
helpers.stub_response(fixture)
helpers.client.payer_authorisations.create(*fixture['url_params'])
helpers.client.payer_authorisations.create(*fixture['url_params'])
assert_not_equal(responses.calls[0].request.headers.get('Idempotency-Key'),
responses.calls[1].request.headers.get('Idempotency-Key'))
def test_timeout_payer_authorisations_create_idempotency_conflict():
create_fixture = helpers.load_fixture('payer_authorisations')['create']
get_fixture = helpers.load_fixture('payer_authorisations')['get']
with helpers.stub_timeout_then_idempotency_conflict(create_fixture, get_fixture) as rsps:
response = helpers.client.payer_authorisations.create(*create_fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_timeout_payer_authorisations_create_retries():
fixture = helpers.load_fixture('payer_authorisations')['create']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.create(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
def test_502_payer_authorisations_create_retries():
fixture = helpers.load_fixture('payer_authorisations')['create']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.create(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_payer_authorisations_update():
fixture = helpers.load_fixture('payer_authorisations')['update']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.update(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
@responses.activate
def test_timeout_payer_authorisations_update_retries():
fixture = helpers.load_fixture('payer_authorisations')['update']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.update(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
def test_502_payer_authorisations_update_retries():
fixture = helpers.load_fixture('payer_authorisations')['update']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.update(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_payer_authorisations_submit():
fixture = helpers.load_fixture('payer_authorisations')['submit']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.submit(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
def test_timeout_payer_authorisations_submit_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['submit']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.payer_authorisations.submit(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_payer_authorisations_submit_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['submit']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.payer_authorisations.submit(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_payer_authorisations_confirm():
fixture = helpers.load_fixture('payer_authorisations')['confirm']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.confirm(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
def test_timeout_payer_authorisations_confirm_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['confirm']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.payer_authorisations.confirm(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_payer_authorisations_confirm_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['confirm']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.payer_authorisations.confirm(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
|
import numpy as np
import Python.density_matrix as DM
import scipy.sparse as sp
import matplotlib.pyplot as plt
from measurements import temps
for i in range(100):
print(i)
pops = [.1, .1, .4]
therm2 = DM.n_thermal_qbits(pops)
therm2.change_to_energy_basis()
temps(therm2)
# print(therm2.ptrace([0, 1]))
# print(therm2.ptrace_to_a_single_qbit(2).data.toarray())
# assert therm1 == therm2
|
import os
from app.app import create_app
from app import jwt
flask_config = os.getenv("FLASK_CONFIG")
app_host = os.getenv("HOST")
app_port = os.getenv("PORT")
user = os.getenv("DB_USER")
passwd = os.getenv("DB_PASSWORD")
host = os.getenv("DB_HOST")
database = os.getenv("DB_DATABASE")
db_uri = "mysql+pymysql://{}:{}@{}/{}".format(
user,
passwd,
host,
database)
app = create_app(flask_config, db_uri)
jwt.init_jwt(app)
@jwt.jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jwt.jwt.check_black_listed_jti(jti)
if __name__ == "__main__":
app.run(host=app_host, port=app_port)
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.core.tests.question import AuctionQuestionResourceTestMixin
from openprocurement.auctions.tessel.tests.base import BaseInsiderAuctionWebTest
class InsiderAuctionQuestionResourceTest(BaseInsiderAuctionWebTest, AuctionQuestionResourceTestMixin):
pass
def suite():
tests = unittest.TestSuite()
tests.addTest(unittest.makeSuite(InsiderAuctionQuestionResourceTest))
return tests
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.