content stringlengths 5 1.05M |
|---|
from math import sqrt
# Returns the closest unit cell type given atomic weight, radius and the comparation mass
# avogrado constant and input
Na = 6.022 * pow(10,23)
A = float(input("Provide the atomic weight: "))
R = float(input("Provide the atomic radius: ")) / (10 ** 7) # optimized for nanometers
# types of unit cells
SC = 1 / (8 * R ** 3)
FCC = 4 / (16 * pow(R, 3) * sqrt(2))
BCC = (2 * 3 * sqrt(3)) / (64 * pow(R, 3))
utypes = {'Simple Cube': SC, 'Face Centered Cube': FCC, 'Body Centered Cube': BCC}
# atomic mass template
RO = A / Na
# comparission input and results dictionary
COMP = float(input("Provide atomic mass for comparation: "))
results = {}
# calculations
for utype, value in utypes.items():
results[f'{utype}'] = abs(COMP - value * RO)
print(f"Type: {utype}, difference: {COMP - value * RO}")
# show lowest value
print(f"\nProbably a {min(results.keys(),key=( lambda k: results[k]))}")
# ... . .... .. . .. .
# .zf"` `"tu @88> .xH888888Hx. . uW8" @88> < .z@8"` @88>
# x88 '8N. x. . %8P .. .H8888888888888: .. . : `t888 %8P !@88E %8P
# 888k d88& .@88k z88u . .@88i 888*"""?""*88888X .u .888: x888 x888. 8888 . . . '888E u .
# 8888N. @888F ~"8888 ^8888 .@88u ""%888> 'f d8x. ^%88k ud8888. ~`8888~'888X`?888f` 9888.z88N .@88u .udR88N 888E u@8NL .@88u
# `88888 9888% 8888 888R ''888E` '88% '> <88888X '?8 :888'8888. X888 888X '888> 9888 888E ''888E` <888'888k 888E`"88*" ''888E`
# %888 "88F 8888 888R 888E ..dILr~` `:..:`888888> 8> d888 '88%" X888 888X '888> 9888 888E 888E 9888 'Y" 888E .dN. 888E
# 8" "*h=~ 8888 888R 888E '".-%88b `"*88 X 8888.+" X888 888X '888> 9888 888E 888E 9888 888E~8888 888E
# z8Weu 8888 ,888B . 888E @ '888k .xHHhx.." ! 8888L X888 888X '888> 9888 888E 888E 9888 888E '888& 888E
# ""88888i. Z "8888Y 8888" 888& 8F 8888 X88888888hx. ..! '8888c. .+ "*88%""*88" '888!` .8888 888" 888& ?8888u../ 888E 9888. 888&
# " "8888888* `Y" 'YP R888" '8 8888 ! "*888888888" "88888% `~ " `"` `%888*%" R888" "8888P' '"888*" 4888" R888"
# ^"**"" "" '8 888F ^"***"` "YP' "` "" "P' "" "" ""
# %k <88F
# "+:*%`
# .--~*teu. .n~~%x. .--~*teu. .n~~%x.
# dF 988Nx x88X 888. dF 988Nx x88X 888.
# d888b `8888> X888X 8888L d888b `8888> X888X 8888L
# ?8888> 98888F X8888X 88888 ?8888> 98888F X8888X 88888
# "**" x88888~ 88888X 88888X "**" x88888~ 88888X 88888X
# d8888*` 88888X 88888X d8888*` 88888X 88888X
# z8**"` : 88888X 88888f z8**"` : 88888X 88888f
# :?..... ..F 48888X 88888 :?..... ..F 48888X 88888
# <""888888888~ ?888X 8888" <""888888888~ ?888X 8888"
# 8: "888888* "88X 88*` 8: "888888* "88X 88*`
# "" "**"` ^"==="` "" "**"` ^"==="`
|
from flask import Flask, render_template
application = Flask(__name__)
@application.route('/')
def index():
navigasi = [
('/','Home'),
('/profile','Profile'),
('/product','Product'),
('/contact','Contact')]
return render_template('index.html', navigasi=navigasi)
@application.route('/profile')
def profile():
return '<h2>Profile</h2>';
@application.route('/product')
def product():
return '<h2>product</h2>';
@application.route('/contact')
def contact():
return '<h2>contact</h2>';
if __name__ == '__main__':
application.run(debug=True) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# setup.py
# @Author : Zack Huang ()
# @Link : zack@atticedu.com
# @Date : 2021/03/16 下午2:03:50
import sdist_upip
from setuptools import setup
import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
sys.path.append("..")
setup(
name='micropython-am7020',
version='1.0.4',
author='Zack Huang',
author_email='zack@atticedu.com',
description='AT Command library dedicated to am7020 http mqtt',
long_description='',
url='https://github.com/JiekangHuang/MicroPython-AM7020',
license='MIT',
cmdclass={'sdist': sdist_upip.sdist},
package_dir={'micropython-am7020': 'am7020'},
packages=['micropython-am7020']
)
|
# coding: utf-8
from .base import BaseAPI, APISET
@APISET.register('asset_host')
class AssetHostAPI(BaseAPI):
def list(self):
'''cmd: get_assets
asset_type: host
filter: sort=name first=1 rows=1000'''
filter_str = "sort=name rows=-1"
params = {
"cmd": "get_assets",
"asset_type": "host",
"filter": filter_str,
}
data = self.client.get(params=params)
data = data['get_assets']['get_assets_response']
# temp
if data['@status'] != '200':
raise ValueError("status not 200")
# make result
assets = self.list_recovery(data.get('asset'))
result = {
"results": assets,
"count": len(assets),
}
return result
|
from __future__ import print_function, unicode_literals
import os
import re
import socket
import ssl
import ttfw_idf
def _path(f):
return os.path.join(os.path.dirname(os.path.realpath(__file__)),f)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_example_protocol_openssl_server(env, extra_data):
"""
steps:
1. join AP
2. connect to uri "xxxx.xxxx.xxxx.xxxx:port"
3. send data
"""
dut1 = env.get_dut('openssl_server', 'examples/protocols/openssl_server', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'openssl_server.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('openssl_server_bin_size', '{}KB'.format(bin_size // 1024))
# start test
dut1.start_app()
ip = dut1.expect(re.compile(r' IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)'), timeout=30)[0]
port = dut1.expect(re.compile(r' SSL server socket listen on ([0-9]+)'), timeout=30)[0]
# create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
addr = (ip, int(port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
# wrap socket
wrappedSocket = ssl.wrap_socket(sock, ca_certs=_path('server_certs/ca.crt'), cert_reqs=ssl.CERT_REQUIRED)
# connect and send data
wrappedSocket.connect(addr)
wrappedSocket.send('Some Data'.encode())
# close socket connection
wrappedSocket.close()
if __name__ == '__main__':
test_example_protocol_openssl_server()
|
import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.winner([10, 10]), 'Sandy')
self.assertEqual(solution.winner([2, 2, 3]), 'Manasa')
if __name__ == '__main__':
unittest.main()
|
from distutils.core import setup
setup(
name = 'mcq_hammertime',
packages = ['mcq_hammertime'],
version = '0.13',
description = 'A GFM to HTML parser that adds interactive Multiple Choice question functionality.',
author = 'Johnny Chang',
author_email = 'johnny@johnnychang.com',
url = 'https://github.com/goldcase/hammertime',
keywords = ['multiple', 'choice', 'question', 'markdown', 'html', 'converter', 'gfm', 'github', 'flavored'],
classifiers = [],
)
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.EGL import _types as _cs
# End users want this...
from OpenGL.raw.EGL._types import *
from OpenGL.raw.EGL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'EGL_EXT_image_dma_buf_import'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.EGL,'EGL_EXT_image_dma_buf_import',error_checker=_errors._error_checker)
EGL_DMA_BUF_PLANE0_FD_EXT=_C('EGL_DMA_BUF_PLANE0_FD_EXT',0x3272)
EGL_DMA_BUF_PLANE0_OFFSET_EXT=_C('EGL_DMA_BUF_PLANE0_OFFSET_EXT',0x3273)
EGL_DMA_BUF_PLANE0_PITCH_EXT=_C('EGL_DMA_BUF_PLANE0_PITCH_EXT',0x3274)
EGL_DMA_BUF_PLANE1_FD_EXT=_C('EGL_DMA_BUF_PLANE1_FD_EXT',0x3275)
EGL_DMA_BUF_PLANE1_OFFSET_EXT=_C('EGL_DMA_BUF_PLANE1_OFFSET_EXT',0x3276)
EGL_DMA_BUF_PLANE1_PITCH_EXT=_C('EGL_DMA_BUF_PLANE1_PITCH_EXT',0x3277)
EGL_DMA_BUF_PLANE2_FD_EXT=_C('EGL_DMA_BUF_PLANE2_FD_EXT',0x3278)
EGL_DMA_BUF_PLANE2_OFFSET_EXT=_C('EGL_DMA_BUF_PLANE2_OFFSET_EXT',0x3279)
EGL_DMA_BUF_PLANE2_PITCH_EXT=_C('EGL_DMA_BUF_PLANE2_PITCH_EXT',0x327A)
EGL_ITU_REC2020_EXT=_C('EGL_ITU_REC2020_EXT',0x3281)
EGL_ITU_REC601_EXT=_C('EGL_ITU_REC601_EXT',0x327F)
EGL_ITU_REC709_EXT=_C('EGL_ITU_REC709_EXT',0x3280)
EGL_LINUX_DMA_BUF_EXT=_C('EGL_LINUX_DMA_BUF_EXT',0x3270)
EGL_LINUX_DRM_FOURCC_EXT=_C('EGL_LINUX_DRM_FOURCC_EXT',0x3271)
EGL_SAMPLE_RANGE_HINT_EXT=_C('EGL_SAMPLE_RANGE_HINT_EXT',0x327C)
EGL_YUV_CHROMA_HORIZONTAL_SITING_HINT_EXT=_C('EGL_YUV_CHROMA_HORIZONTAL_SITING_HINT_EXT',0x327D)
EGL_YUV_CHROMA_SITING_0_5_EXT=_C('EGL_YUV_CHROMA_SITING_0_5_EXT',0x3285)
EGL_YUV_CHROMA_SITING_0_EXT=_C('EGL_YUV_CHROMA_SITING_0_EXT',0x3284)
EGL_YUV_CHROMA_VERTICAL_SITING_HINT_EXT=_C('EGL_YUV_CHROMA_VERTICAL_SITING_HINT_EXT',0x327E)
EGL_YUV_COLOR_SPACE_HINT_EXT=_C('EGL_YUV_COLOR_SPACE_HINT_EXT',0x327B)
EGL_YUV_FULL_RANGE_EXT=_C('EGL_YUV_FULL_RANGE_EXT',0x3282)
EGL_YUV_NARROW_RANGE_EXT=_C('EGL_YUV_NARROW_RANGE_EXT',0x3283)
|
import requests
import json
class PyPantry:
def __init__(self, pantryID:str):
self.pantryID = pantryID
self.url = f"https://getpantry.cloud/apiv1/pantry/{self.pantryID}"
def get(self):
headers = {
'Content-Type': 'application/json'
}
response = requests.request("GET", self.url, headers=headers, data="")
return json.loads(response.text)
def post(self, basket:str, data:dict):
payload = json.dumps(data)
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", self.url+f"/basket/{basket}", headers=headers, data=payload)
print(response.text) |
# Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""\
Import resnet18 model from file and reshape its input to train on cifar10.
"""
import argparse
import sys
import pyeddl.eddl as eddl
from pyeddl.tensor import Tensor, DEV_CPU
MEM_CHOICES = ("low_mem", "mid_mem", "full_mem")
def main(args):
freeze_epochs = 2
unfreeze_epochs = 5
num_classes = 10 # 10 labels in cifar10
eddl.download_cifar10()
eddl.download_model("resnet18.onnx", "re7jodd12srksd7")
net = eddl.import_net_from_onnx_file(
"resnet18.onnx", [3, 32, 32], DEV_CPU
)
names = [_.name for _ in net.layers]
# Remove dense output layer
eddl.removeLayer(net, "resnetv15_dense0_fwd")
# Get last layer to connect the new dense
layer = eddl.getLayer(net, "flatten_170")
out = eddl.Softmax(eddl.Dense(layer, num_classes, True, "new_dense"))
# Get input layer
in_ = eddl.getLayer(net, "data")
# Create a new model
net = eddl.Model([in_], [out])
eddl.build(
net,
eddl.adam(0.0001),
["softmax_cross_entropy"],
["categorical_accuracy"],
eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem),
False # do not initialize weights to random values
)
eddl.summary(net)
# Force initialization of new layers
eddl.initializeLayer(net, "new_dense")
x_train = Tensor.load("cifar_trX.bin")
y_train = Tensor.load("cifar_trY.bin")
x_test = Tensor.load("cifar_tsX.bin")
y_test = Tensor.load("cifar_tsY.bin")
if args.small:
sel = [f":{2 * args.batch_size}"]
x_train = x_train.select(sel)
y_train = y_train.select(sel)
x_test = x_test.select(sel)
y_test = y_test.select(sel)
x_train.div_(255.0)
x_test.div_(255.0)
# Freeze pretrained weights
for n in names:
eddl.setTrainable(net, n, False)
# Train new layers
eddl.fit(net, [x_train], [y_train], args.batch_size, freeze_epochs)
# Unfreeze weights
for n in names:
eddl.setTrainable(net, n, True)
# Train all layers
eddl.fit(net, [x_train], [y_train], args.batch_size, unfreeze_epochs)
# Evaluate
eddl.evaluate(net, [x_test], [y_test], args.batch_size)
print("All done")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--batch-size", type=int, metavar="INT", default=100)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--input", metavar="STRING",
default="trained_model.onnx",
help="input path of the serialized model")
parser.add_argument("--mem", metavar="|".join(MEM_CHOICES),
choices=MEM_CHOICES, default="low_mem")
parser.add_argument("--small", action="store_true")
main(parser.parse_args(sys.argv[1:]))
|
import os
from setuptools import setup, find_packages
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-bootstrap-admin',
version='0.3.2',
packages=find_packages(),
include_package_data=True,
license='MIT License', # example license
description='Adds twitter bootstrap and other nicities to the Django admin',
long_description=README,
url='http://github.com/maxfolley/django-bootstrap-admin',
author='Maxwell Folley',
author_email='maxwell@weareflavor.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# replace these appropriately if you are using Python 3
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
""" Table to store the classifiers defined by every group
In this initial version:
- Every entry to the table is a set of classifiers associated to a group
- The definitions of all classifiers as stored as a json in the 'bundle' column
- Notice that the definition of classifier follows a
"""
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.sql import func
from .base import metadata
group_classifiers = sa.Table(
"group_classifiers",
metadata,
sa.Column("id", sa.BigInteger, nullable=False),
# TODO: add jsonschema validtion
sa.Column("bundle", JSONB, nullable=False),
sa.Column("created", sa.DateTime(), nullable=False, server_default=func.now()),
sa.Column(
"modified",
sa.DateTime(),
nullable=False,
server_default=func.now(),
onupdate=func.now(), # this will auto-update on modification
),
sa.Column(
"gid",
sa.BigInteger,
sa.ForeignKey(
"groups.gid",
name="fk_group_classifiers_gid_to_groups_gid",
onupdate="CASCADE",
ondelete="CASCADE",
),
unique=True, # Every Group can ONLY have one set of classifiers
),
sa.PrimaryKeyConstraint("id", name="group_classifiers_pkey"),
)
|
"""
Test cases for the regi0.geographic.utils.create_id_grid function.
"""
import numpy as np
import pytest
from regi0.geographic.utils import create_id_grid
@pytest.fixture(scope="module")
def matching_grid():
return create_id_grid(-1.0, -1.0, 2.0, 4.0, 0.25)
@pytest.fixture(scope="module")
def unmatching_grid():
return create_id_grid(1.0, 1.0, 4.0, 4.0, 0.33)
def test_width(matching_grid):
assert matching_grid.width == 12
def test_height(matching_grid):
assert matching_grid.height == 20
def test_origin(matching_grid):
assert matching_grid.transform.c == -1.0 and matching_grid.transform.f == 4.0
def test_resolution(matching_grid):
assert matching_grid.transform.a == 0.25 and matching_grid.transform.e == -0.25
def test_unique(matching_grid):
arr = matching_grid.read(1)
assert arr.size == np.unique(arr).size
def test_unmatching_bounds(unmatching_grid):
bounds = unmatching_grid.bounds
assert bounds.bottom < 1.0 and bounds.right > 4.0
def test_force_origin(unmatching_grid):
assert unmatching_grid.transform.c == 1.0 and unmatching_grid.transform.f == 4.0
def test_other_crs():
grid = create_id_grid(0, 0, 1000, 1000, 100, crs="epsg:3857")
assert grid.crs.to_string() == "EPSG:3857"
|
from pact_testgen.generator import generate_tests
from pact_testgen.dialects.django import Dialect
def test_django_test_generator_output_is_parsable(testfile):
test_file, _ = generate_tests(testfile, Dialect())
compile(test_file, "<string>", "exec")
def test_output_includes_expected_test_cases(testfile):
test_file, _ = generate_tests(testfile, Dialect())
# Names of test cases we expect to see. This is driven directly
# by test_app/client_tests.py
print(f"\nTEST FILE\n------\n\n{test_file}\n")
assert "TestAnAuthorId1" in test_file
assert "TestAnAuthorId1ABookExistsWithAuthorId1" in test_file
assert "TestNoInitialState" in test_file
assert "test_an_author_creation_request" in test_file
assert "test_a_book_search_request_for_a_non_existent_author" in test_file
assert "test_a_request_for_author_id_1" in test_file
assert "test_an_author_update_request" in test_file
assert "test_an_author_deletion_request" in test_file
assert "test_a_book_search_request_for_author_id_1" in test_file
def test_provider_state_file_has_expected_methods(testfile):
_, provider_state_file = generate_tests(testfile, Dialect())
print(f"\nPROVIDER STATE FILE\n-------------------\n\n{provider_state_file}\n")
assert "setup_nothing" not in provider_state_file
assert "setup_an_author_id_1" in provider_state_file
assert "setup_an_author_id_1_a_book_exists_with_author_id_1" in provider_state_file
|
import logging
from pathlib import Path
import hydra
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from src.backend.base import BackendBase
from src.models.sst_model import SSTModel
from src.utils import utils
logging.basicConfig()
log = utils.get_logger(__name__)
log.setLevel(logging.INFO)
class TorchBackend(BackendBase):
def __init__(self, config: DictConfig) -> None:
self.config = config
def prepare(self) -> None:
# Init lightning model
log.info("Loading model")
checkpoint_path: Path = (
Path(self.config.work_dir) / self.config.checkpoint_path
).resolve()
self.model: pl.LightningModule = SSTModel.load_from_checkpoint(checkpoint_path)
# Pass model_id from model to datamodule config to get correct tokenizer
self.config.datamodule["model_id"] = self.model.model_id
# Init lightning datamodule
log.info(f"Instantiating datamodule <{self.config.datamodule._target_}>")
self.datamodule: pl.LightningDataModule = hydra.utils.instantiate(self.config.datamodule)
# Prepare device
self.device = torch.device(self.config.device)
if self.device.type == "cuda" and not torch.cuda.is_available():
log.warning("Cuda is not available. Using CPU")
self.device = torch.device("cpu")
self.config.device = (
self.device.type
) # make sure logger receives correct device type in case of change
self.metric_device = self.device
log.info(f"Running benchmark on {self.device.type}")
# Prepare model
log.info("Preparing model for prediction")
torch.set_grad_enabled(False)
self.model.eval()
self.model.to(self.device)
def predict(self, batch: dict) -> torch.Tensor:
self.move_tensors_to_device(batch)
return self.model(batch)
def move_tensors_to_device(self, data: dict) -> None:
"""
Moves all tensors in a dict to a device
"""
for key, value in data.items():
if isinstance(value, torch.Tensor):
data[key] = data[key].to(self.device)
elif isinstance(value, dict):
self.move_tensors_to_device(value, self.device)
|
#!/usr/bin/python3
from glob import glob # file enumeration
from datetime import datetime # date handling
import mistune # markdown parsing/rendering
template = open("template.html", "r").read()
posts = [] # will hold all the information about blog posts
markdown = mistune.Markdown(escape=False) # escape=False means we can embed HTML in Markdown source
for post_file in glob("posts/*.md"):
post = {}
# horribly hacky parsing...
name_parts = post_file.split("/")[-1].split(".")[0].split("-")
year, month, day = [int(n) for n in name_parts[:3]]
post["date"] = datetime(year, month, day)
post["slug"] = "-".join(name_parts[3:])
raw_content = open(post_file, "r").read()
post["title"] = raw_content.split("\n")[0].lstrip("# ") # We assume the title is on the first line
# render markdown, apply template, and save to an output file
with open("blog/{}.html".format(post["slug"]), "w") as html:
html.write(template.format(post["title"], markdown(raw_content)))
posts.append(post) # keep track of all the post metadata
posts.sort(key=lambda p: p["date"], reverse=True) # chronological sort for index page listing
with open("index.html", "w") as index:
content = ["<h1>Blog Posts</h1>"]
entry = """<p style="margin: 0;"><a href="blog/{}.html">{}</a><em> - {}</em></p>"""
for post in posts:
date = post["date"].strftime("%b %d %Y") # Format date nicely into a string
content.append(entry.format(post["slug"], post["title"], date))
index.write(template.format("Blog", "\n\t\t\t".join(content)))
|
import django_filters.rest_framework as filters
from .models import Monster, SkillEffect, Skill, LeaderSkill, ScalingStat
class MonsterFilter(filters.FilterSet):
name = filters.CharFilter(method='filter_name')
element = filters.MultipleChoiceFilter(choices=Monster.ELEMENT_CHOICES)
archetype = filters.MultipleChoiceFilter(choices=Monster.TYPE_CHOICES)
leader_skill_attribute = filters.MultipleChoiceFilter(field_name='leader_skill__attribute', choices=LeaderSkill.ATTRIBUTE_CHOICES)
leader_skill_area = filters.MultipleChoiceFilter(field_name='leader_skill__area', choices=LeaderSkill.AREA_CHOICES)
order_by = filters.OrderingFilter(fields=[
'name', 'element', 'base_stars', 'archetype', 'com2us_id', 'family_id',
'raw_hp', 'raw_attack', 'raw_defense',
'base_hp', 'base_attack', 'base_defense',
'max_lvl_hp', 'max_lvl_attack', 'max_lvl_defense',
'speed', 'crit_rate', 'crit_damage', 'resistance', 'accuracy',
])
class Meta:
model = Monster
fields = {
'id': ['in'],
'com2us_id': ['exact'],
'family_id': ['exact'],
'base_stars': ['lte', 'gte'],
'obtainable': ['exact'],
'is_awakened': ['exact'],
'fusion_food': ['exact'],
'homunculus': ['exact'],
}
def filter_name(self, queryset, name, value):
if value:
return queryset.filter(name__istartswith=value)
else:
return queryset
class SkillFilter(filters.FilterSet):
name = filters.CharFilter(method='filter_name')
description = filters.CharFilter(method='filter_description')
scaling_stats__pk = filters.ModelMultipleChoiceFilter(queryset=ScalingStat.objects.all(), to_field_name='pk', conjoined=True)
effects_logic = filters.BooleanFilter(method='filter_effects_logic')
effect__pk = filters.ModelMultipleChoiceFilter(queryset=SkillEffect.objects.all(), method='filter_skill_effects')
used_on = filters.NumberFilter(method='filter_used_on')
class Meta:
model = Skill
fields = {
'id': ['in'],
'name': ['exact'],
'com2us_id': ['exact'],
'slot': ['exact'],
'cooltime': ['exact', 'isnull', 'gte', 'lte', 'gt', 'lt'],
'hits': ['exact', 'isnull', 'gte', 'lte', 'gt', 'lt'],
'aoe': ['exact'],
'passive': ['exact'],
'max_level': ['exact', 'gte', 'lte', 'gt', 'lt'],
}
def filter_name(self, queryset, name, value):
return queryset.filter(name__istartswith=value)
def filter_description(self, queryset, name, value):
return queryset.filter(description__icontains=value)
def filter_skill_effects(self, queryset, name, value):
old_filtering = self.form.cleaned_data.get('effects_logic', False)
stat_scaling = self.form.cleaned_data.get('scaling_stats__pk', [])
if old_filtering:
# Filter if any skill on the monster has the designated fields
for effect in value:
queryset = queryset.filter(skill_effect=effect)
for pk in stat_scaling:
queryset = queryset.filter(scaling_stats=pk)
return queryset.distinct()
else:
# Filter effects based on effects of each individual skill. This ensures a monster will not show up unless it has
# the desired effects on the same skill rather than across any skills.
skills = Skill.objects.all()
for effect in value:
skills = skills.filter(skill_effect=effect)
for pk in stat_scaling:
skills = skills.filter(scaling_stats=pk)
return queryset.filter(pk__in=skills).distinct()
def filter_used_on(self, queryset, name, value):
return queryset.filter(monster__pk=value)
def filter_effects_logic(self, queryset, name, value):
# This field is just used to alter the logic of skill effect filter
return queryset
|
import sys
sys.path.append('../..')
import os
import json
import logging
import argparse
import numpy as np
from datetime import datetime
from seqeval import metrics
from seqeval.scheme import IOB2
from data_constr.Src.IO import set_logging
logger = logging.getLogger(__name__)
_time = datetime.now().strftime("%m.%d.%y-%H.%M")
_current_file_name = os.path.basename(__file__)
if _current_file_name.endswith('.py'):
_current_file_name = _current_file_name[:-3]
def parse_args():
"""
Wrapper function of argument parsing process.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--save_loc', type=str, default='.',
help='where to save results'
)
parser.add_argument(
'--log_dir', type=str, default=os.path.join('logs', f'{_current_file_name}.{_time}.log'),
help='the directory of the log file'
)
args = parser.parse_args()
return args
def main(args):
set_logging(args.log_dir)
logger.setLevel(logging.INFO)
logger.info(f"Parameters: {args}")
logger.info('Reading data...')
with open(os.path.join(args.save_loc, f"train.json"), 'r', encoding='utf-8') as f:
train_data = json.load(f)
with open(os.path.join(args.save_loc, f"valid.json"), 'r', encoding='utf-8') as f:
valid_data = json.load(f)
with open(os.path.join(args.save_loc, f"test.json"), 'r', encoding='utf-8') as f:
test_data = json.load(f)
logger.info('Reading metadata...')
with open(os.path.join(args.save_loc, "meta.json"), 'r', encoding='utf-8') as f:
meta = json.load(f)
logger.info('Getting new metadata')
max_length = 0
for data in [train_data, valid_data, test_data]:
for k, v in data.items():
l_sent = len(v['data']['text'])
if l_sent > max_length:
max_length = l_sent
meta['train_size'] = len(train_data)
meta['valid_size'] = len(valid_data)
meta['test_size '] = len(test_data)
meta['max_length'] = max_length
meta['num_lf'] = len(meta['lf'])
meta['num_labels'] = 2 * len(meta['entity_types']) + 1
# get the performance of each source
t_lbs = list()
w_lbs = [[] for _ in range(meta['num_lf'])]
for k, v in train_data.items():
t_lbs.append(v['label'])
for i, w_lb in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
for k, v in valid_data.items():
t_lbs.append(v['label'])
for i, w_lb in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
for k, v in test_data.items():
t_lbs.append(v['label'])
for i, w_lb in enumerate(np.asarray(v['weak_labels']).T):
w_lbs[i].append(w_lb.tolist())
rec_src = list()
logger.info(f'Source performance (F1 score)')
for i, src_name in enumerate(meta['lf']):
f1 = metrics.f1_score(t_lbs, w_lbs[i], mode='strict', scheme=IOB2)
logger.info(f'{src_name}: {f1}')
if f1 > 0.05:
rec_src.append(src_name)
logger.info(f'The following sources are recommended for model evaluation:\n'
f'\t{rec_src}')
meta['lf_rec'] = rec_src
meta['num_lf_rec'] = len(rec_src)
logger.info('Saving results...')
with open(os.path.join(args.save_loc, "meta.json"), 'w', encoding='utf-8') as f:
json.dump(meta, f, ensure_ascii=False, indent=2)
logger.info('Exit with no error')
if __name__ == '__main__':
argument = parse_args()
main(argument)
|
import json
import shutil
from datetime import datetime
from pathlib import Path
from jinja2 import Template
from services.meal_services import MealPlan
from services.recipe_services import Recipe
from services.settings_services import SiteSettings, SiteTheme
from app_config import BACKUP_DIR, IMG_DIR, TEMP_DIR, TEMPLATE_DIR
from utils.logger import logger
class ExportDatabase:
def __init__(self, tag=None, templates=None) -> None:
"""Export a Mealie database. Export interacts directly with class objects and can be used
with any supported backend database platform. By default tags are timestands, and no Jinja2 templates are rendered
Args:
tag ([str], optional): A str to be used as a file tag. Defaults to None.
templates (list, optional): A list of template file names. Defaults to None.
"""
if tag:
export_tag = tag + "_" + datetime.now().strftime("%Y-%b-%d")
else:
export_tag = datetime.now().strftime("%Y-%b-%d")
self.main_dir = TEMP_DIR.joinpath(export_tag)
self.img_dir = self.main_dir.joinpath("images")
self.recipe_dir = self.main_dir.joinpath("recipes")
self.themes_dir = self.main_dir.joinpath("themes")
self.settings_dir = self.main_dir.joinpath("settings")
self.templates_dir = self.main_dir.joinpath("templates")
self.mealplans_dir = self.main_dir.joinpath("mealplans")
try:
self.templates = [TEMPLATE_DIR.joinpath(x) for x in templates]
except:
self.templates = False
logger.info("No Jinja2 Templates Registered for Export")
required_dirs = [
self.main_dir,
self.img_dir,
self.recipe_dir,
self.themes_dir,
self.settings_dir,
self.templates_dir,
self.mealplans_dir,
]
for dir in required_dirs:
dir.mkdir(parents=True, exist_ok=True)
def export_recipes(self):
all_recipes = Recipe.get_all()
for recipe in all_recipes:
logger.info(f"Backing Up Recipes: {recipe}")
filename = recipe.get("slug") + ".json"
file_path = self.recipe_dir.joinpath(filename)
ExportDatabase._write_json_file(recipe, file_path)
if self.templates:
self._export_template(recipe)
def _export_template(self, recipe_data: dict):
for template_path in self.templates:
with open(template_path, "r") as f:
template = Template(f.read())
filename = recipe_data.get("name") + template_path.suffix
out_file = self.templates_dir.joinpath(filename)
content = template.render(recipe=recipe_data)
with open(out_file, "w") as f:
f.write(content)
def export_images(self):
for file in IMG_DIR.iterdir():
shutil.copy(file, self.img_dir.joinpath(file.name))
def export_settings(self):
all_settings = SiteSettings.get_site_settings()
out_file = self.settings_dir.joinpath("settings.json")
ExportDatabase._write_json_file(all_settings.dict(), out_file)
def export_themes(self):
all_themes = SiteTheme.get_all()
if all_themes:
all_themes = [x.dict() for x in all_themes]
out_file = self.themes_dir.joinpath("themes.json")
ExportDatabase._write_json_file(all_themes, out_file)
def export_meals(
self,
): #! Problem Parseing Datetime Objects... May come back to this
meal_plans = MealPlan.get_all()
if meal_plans:
meal_plans = [x.dict() for x in meal_plans]
out_file = self.mealplans_dir.joinpath("mealplans.json")
ExportDatabase._write_json_file(meal_plans, out_file)
@staticmethod
def _write_json_file(data, out_file: Path):
json_data = json.dumps(data, indent=4, default=str)
with open(out_file, "w") as f:
f.write(json_data)
def finish_export(self):
zip_path = BACKUP_DIR.joinpath(f"{self.main_dir.name}")
shutil.make_archive(zip_path, "zip", self.main_dir)
shutil.rmtree(TEMP_DIR)
return str(zip_path.absolute()) + ".zip"
def backup_all(tag=None, templates=None):
db_export = ExportDatabase(tag=tag, templates=templates)
db_export.export_recipes()
db_export.export_images()
db_export.export_settings()
db_export.export_themes()
db_export.export_meals()
#
return db_export.finish_export()
def auto_backup_job():
for backup in BACKUP_DIR.glob("Auto*.zip"):
backup.unlink()
templates = []
for template in TEMPLATE_DIR.iterdir():
templates.append(template)
backup_all(tag="Auto", templates=templates)
logger.info("Auto Backup Called")
|
def bubbleSort(customList):
for i in range(len(customList)-1): #----> O(n)
for j in range(len(customList)-i-1): #----> O(n)
if customList[j] > customList[j+1]:
customList[j] , customList[j+1] = customList[j+1], customList[j]
return customList
cList = [5,9,3,1,2,8,4,7,6]
print(bubbleSort(cList))
#-- Time Complexity - O(n^2)
#-- Space Complexity - O(1) |
import os,shutil,glob
def alter_dir(src,dst):
if os.path.exists(dst):
shutil.rmtree(dst)
os.makedirs(dst)
fs=glob.glob(src+'/*.jpg')
for i,f in enumerate(fs):
name=os.path.basename(f)
f2=dst+'/'+name.replace('.jpg','_car0.jpg')
shutil.copy(f,f2)
print(i,f,f2)
def alter_annot(annot_path,dst_path,new_dir):
lines=open(annot_path).read().strip().split('\n')
# print(lines)
def change_line(line):
p,rest=line.split(' ',maxsplit=1)
p=new_dir+'/'+os.path.basename(p).replace('.jpg','_car0.jpg')
return ' '.join([p,rest])
lines=[change_line(line) for line in lines]
lines='\n'.join(lines)
with open(dst_path,'w') as f:
f.write(lines)
def alter_dataset():
src_dir='/home/ars/disk/chaoyuan/dataset/车灯'
dst_dir='/home/ars/disk/chaoyuan/dataset/more_datasets/车灯v2'
annot_path=src_dir+'/annotations.txt'
new_annot_path=dst_dir+'/annotations.txt'
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
raw_dir=src_dir+'/raw'
new_raw_dir=dst_dir+'/raw'
train_dir=src_dir+'/train'
new_train_dir=dst_dir+'/train'
val_dir=src_dir+'/val'
new_val_dir=dst_dir+'/val'
alter_dir(raw_dir,new_raw_dir)
alter_dir(train_dir,new_train_dir)
alter_dir(val_dir,new_val_dir)
alter_annot(annot_path,new_annot_path,new_train_dir)
if __name__ == '__main__':
alter_dataset() |
"""
c17.py
Cryptopals Set 3, Challenge 17
"""
def main():
return 0
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTinycss2(PythonPackage):
"""tinycss2 is a low-level CSS parser and generator written in Python: it can parse
strings, return objects representing tokens and blocks, and generate CSS strings
corresponding to these objects."""
homepage = "https://www.courtbouillon.org/tinycss2"
pypi = "tinycss2/tinycss2-1.1.1.tar.gz"
version('1.1.1', sha256='b2e44dd8883c360c35dd0d1b5aad0b610e5156c2cb3b33434634e539ead9d8bf')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-flit-core@3.2:3', type='build')
depends_on('py-webencodings@0.4:', type=('build', 'run'))
|
# Credit to https://github.com/KaiyangZhou/deep-person-reid
from .accuracy import accuracy # noqa
from .rank import evaluate_rank # noqa
|
import os
import logging
import boto3
import urllib.request
import zipfile
from collections import OrderedDict
import numpy as np
import pandas as pd
from sqlalchemy import func
from dataactcore.logging import configure_logging
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.jobModels import Submission # noqa
from dataactcore.models.userModel import User # noqa
from dataactcore.models.stagingModels import PublishedAwardFinancialAssistance
from dataactvalidator.health_check import create_app
from dataactvalidator.scripts.loader_utils import clean_data
logger = logging.getLogger(__name__)
def parse_fabs_file_new_columns(f, sess):
csv_file = 'datafeeds\\' + os.path.splitext(os.path.basename(f.name))[0]
column_header_mapping = {"agency_code": 0, "federal_award_mod": 1, "federal_award_id": 2, "uri": 3,
"awarding office code": 4, "awarding office name": 5, "funding office name": 6,
"funding office code": 7, "funding agency name": 8, "funding agency code": 9,
"funding sub tier agency code": 10, "funding sub tier agency name": 11,
"legal entity foreign city": 12, "legal entity foreign province": 13,
"legal entity foreign postal code": 14, "legal entity foreign location description": 15}
column_header_mapping_ordered = OrderedDict(sorted(column_header_mapping.items(), key=lambda c: c[1]))
nrows = 0
with zipfile.ZipFile(f.name) as zfile:
with zfile.open(csv_file) as dat_file:
nrows = len(dat_file.readlines())
block_size, batch, added_rows = 10000, 0, 0
batches = nrows // block_size
last_block_size = (nrows % block_size)
while batch <= batches:
skiprows = 1 if batch == 0 else (batch * block_size)
nrows = (((batch + 1) * block_size) - skiprows) if (batch < batches) else last_block_size
logger.info('loading rows %s to %s', skiprows + 1, nrows + skiprows)
with zipfile.ZipFile(f.name) as zip_file:
with zip_file.open(csv_file) as dat_file:
data = pd.read_csv(dat_file, dtype=str, header=None, skiprows=skiprows, nrows=nrows,
usecols=column_header_mapping_ordered.values(),
names=column_header_mapping_ordered.keys())
cdata = format_fabs_data(data)
if cdata is not None:
for _, row in cdata.iterrows():
sess.query(PublishedAwardFinancialAssistance).\
filter(func.upper(PublishedAwardFinancialAssistance.afa_generated_unique)
== row['afa_generated_unique'].upper()).\
update({"awarding_office_code": row['awarding_office_code'],
"awarding_office_name": row['awarding_office_name'],
"funding_office_name": row['funding_office_name'],
"funding_office_code": row['funding_office_code'],
"funding_agency_name": row['funding_agency_name'],
"funding_agency_code": row['funding_agency_code'],
"funding_sub_tier_agency_co": row['funding_sub_tier_agency_co'],
"funding_sub_tier_agency_na": row['funding_sub_tier_agency_na'],
"legal_entity_foreign_city": row['legal_entity_foreign_city'],
"legal_entity_foreign_provi": row['legal_entity_foreign_provi'],
"legal_entity_foreign_posta": row['legal_entity_foreign_posta'],
"legal_entity_foreign_descr": row['legal_entity_foreign_descr']},
synchronize_session=False)
added_rows += nrows
batch += 1
logger.info('%s PublishedAwardFinancialAssistance records updated', added_rows)
sess.commit()
def format_fabs_data(data):
# drop all records without any data to be loaded
data = data.replace('', np.nan, inplace=True)
data.dropna(subset=["awarding office code", "awarding office name", "funding office name", "funding office code",
"funding agency name", "funding agency code", "funding sub tier agency code",
"funding sub tier agency name", "legal entity foreign city", "legal entity foreign province",
"legal entity foreign postal code", "legal entity foreign location description"], inplace=True)
# ensure there are rows to be cleaned and formatted
if len(data.index) == 0:
return None
cdata = clean_data(
data,
PublishedAwardFinancialAssistance,
{
"agency_code": "awarding_sub_tier_agency_c",
"federal_award_mod": "award_modification_amendme",
"federal_award_id": "fain",
"uri": "uri",
"awarding office code": "awarding_office_code",
"awarding office name": "awarding_office_name",
"funding office name": "funding_office_name",
"funding office code": "funding_office_code",
"funding agency name": "funding_agency_name",
"funding agency code": "funding_agency_code",
"funding sub tier agency code": "funding_sub_tier_agency_co",
"funding sub tier agency name": "funding_sub_tier_agency_na",
"legal entity foreign city": "legal_entity_foreign_city",
"legal entity foreign province": "legal_entity_foreign_provi",
"legal entity foreign postal code": "legal_entity_foreign_posta",
"legal entity foreign location description": "legal_entity_foreign_descr"
}, {}
)
# make a pass through the dataframe, changing any empty values to None, to ensure that those are represented as
# NULL in the db.
cdata = cdata.replace(np.nan, '', regex=True)
cdata = cdata.applymap(lambda x: str(x).strip() if len(str(x).strip()) else None)
# generate the afa_generated_unique field
cdata['afa_generated_unique'] = cdata.apply(lambda x: generate_unique_string(x), axis=1)
# drop columns in afa_generated_unique because we aren't updating them
for col in ["awarding_sub_tier_agency_c", "award_modification_amendme", "fain", "uri"]:
del cdata[col]
return cdata
def generate_unique_string(row):
# create unique string from the awarding_sub_tier_agency_c, award_modification_amendme, fain, and uri
astac = row['awarding_sub_tier_agency_c'] if row['awarding_sub_tier_agency_c'] is not None else '-none-'
ama = row['award_modification_amendme'] if row['award_modification_amendme'] is not None else '-none-'
fain = row['fain'] if row['fain'] is not None else '-none-'
uri = row['uri'] if row['uri'] is not None else '-none-'
# todo: if we ever need this script again, we have to check if they give us cfda number and add it here
return astac + '_' + fain + '_' + uri + '_-none-_' + ama
def main():
sess = GlobalDB.db().session
logger.info('Starting updates to FABS data')
if CONFIG_BROKER["use_aws"]:
s3_client = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
new_columns_file = s3_client.generate_presigned_url('get_object', {'Bucket': CONFIG_BROKER['archive_bucket'],
'Key': "Assistance_DataActFields_2017.csv"},
ExpiresIn=600)
parse_fabs_file_new_columns(urllib.request.urlopen(new_columns_file), sess)
else:
new_columns_file = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config", "fabs",
"Assistance_DataActFields_2017.csv")
parse_fabs_file_new_columns(open(new_columns_file), sess)
logger.info("Historical FABS column update script complete")
if __name__ == '__main__':
configure_logging()
with create_app().app_context():
main()
|
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
dp = [0] * 60
output = 0
for t in time:
reminder = t % 60
target = (60 - reminder) % 60
output += dp[target]
dp[output] += 1
return output
|
import pytest
from .. import util
from ..common import LoadContext
from ..gateway import (AccessSecurity, AliasRule, AllowRule, DenyRule,
GatewayConfig, PVList, PVListMatch, PVListMatches)
from .conftest import MODULE_PATH
@pytest.fixture
def kfe_pvlist():
return PVList.from_file(
(MODULE_PATH / "kfe.pvlist").resolve()
)
@pytest.fixture
def gateway_config():
return GatewayConfig(path=MODULE_PATH, glob_str="*.pvlist")
@pytest.mark.parametrize(
"rule, expected",
[
pytest.param(
"SXR:YAG:CVV:01.* ALLOW RWINSTRMCC 1",
AllowRule(
context=(LoadContext("None", 1), ),
command="ALLOW",
pattern="SXR:YAG:CVV:01.*",
access=AccessSecurity(
group="RWINSTRMCC",
level="1",
)
),
id="allow_basic",
),
pytest.param(
"SXR:YAG:CVV:01.* DENY",
DenyRule(
context=(LoadContext("None", 1), ),
command="DENY",
pattern="SXR:YAG:CVV:01.*",
hosts=[],
),
id="deny_basic",
),
pytest.param(
"SXR:YAG:CVV:01.* DENY FROM host1",
DenyRule(
context=(LoadContext("None", 1), ),
command="DENY",
pattern="SXR:YAG:CVV:01.*",
hosts=["host1"],
),
id="deny_host",
),
pytest.param(
"SXR:YAG:CVV:01.* DENY FROM host1 host2",
DenyRule(
context=(LoadContext("None", 1), ),
command="DENY",
pattern="SXR:YAG:CVV:01.*",
hosts=["host1", "host2"],
),
id="deny_hosts",
),
pytest.param(
r":gateway\.\(.*\)Flag ALIAS gateway:\1Flag ",
AliasRule(
context=(LoadContext("None", 1), ),
command="ALIAS",
pattern=r":gateway\.\(.*\)Flag",
pvname=r"gateway:\1Flag",
access=None,
),
id="alias",
),
pytest.param(
r":gateway\.\(.*\)Flag ALIAS gateway:\1Flag GatewayAdmin",
AliasRule(
context=(LoadContext("None", 1), ),
command="ALIAS",
pattern=r":gateway\.\(.*\)Flag",
pvname=r"gateway:\1Flag",
access=AccessSecurity(
group="GatewayAdmin",
level=None,
)
),
id="alias_access",
),
pytest.param(
"* ALIAS gateway:Flag",
AliasRule(
context=(LoadContext("None", 1), ),
command="ALIAS",
pattern="*",
pvname="gateway:Flag",
# Hard to test for, as this is set automatically:
# metadata={
# "error": (
# "Invalid regex. error: nothing to "
# "repeat at position 0'",
# )
# }
),
id="invalid_pattern",
),
]
)
def test_rule(rule, expected):
assert PVList.from_string(rule).rules[0] == expected
def test_eval_order():
assert PVList.from_string(
"evaluation order allow, deny"
).evaluation_order == "ALLOW, DENY"
def test_full():
source = """
# Header Text 1
# Header Text 2
evaluation order allow, deny
# Skipped Comment
# Rule Comment 1
# Rule Comment 2
SXR:YAG:CVV:01.* ALLOW RWINSTRMCC 1
"""
source_hash = util.get_bytes_sha256(source.encode("utf-8"))
assert PVList.from_string(
source,
filename="filename",
) == PVList(
filename="filename",
evaluation_order="ALLOW, DENY",
comments=[
"Header Text 1",
"Header Text 2",
"Skipped Comment",
"Rule Comment 1",
"Rule Comment 2",
],
header="Header Text 1\nHeader Text 2",
hash=source_hash,
rules=[
AllowRule(
context=(LoadContext("filename", 9), ),
command="ALLOW",
pattern="SXR:YAG:CVV:01.*",
access=AccessSecurity(
group="RWINSTRMCC",
level="1",
),
header="Rule Comment 1\nRule Comment 2",
),
],
)
def test_match_without_context(kfe_pvlist: PVList):
assert list(kfe_pvlist.match("IOC:KFE:ABC")) == [
(
AllowRule(
context=(LoadContext(kfe_pvlist.filename, 10,), ),
pattern="[A-Z][A-Z][A-Z]:KFE:.*",
command="ALLOW",
# regex=re.compile("[A-Z][A-Z][A-Z]:KFE:.*"),
header="",
metadata={},
access=None,
),
[]
)
]
def test_match_with_context(kfe_pvlist: PVList):
assert list(kfe_pvlist.match("IOC:RIX:ABC")) == [
(
AllowRule(
context=(LoadContext(kfe_pvlist.filename, 13,), ),
pattern="[A-Z][A-Z][A-Z]:RIX:.*",
command="ALLOW",
# regex=re.compile("[A-Z][A-Z][A-Z]:KFE:.*"),
header="Some RIX devices temporarily on KFE subnet",
metadata={},
access=None,
),
[]
)
]
def test_gateway_config(kfe_pvlist: PVList, gateway_config: GatewayConfig):
gateway_config.update_changed()
kfe_fn = str(kfe_pvlist.filename)
assert gateway_config.get_matches("IOC:RIX:ABC") == PVListMatches(
name="IOC:RIX:ABC",
matches=[
PVListMatch(
filename=kfe_fn,
rule=AllowRule(
context=(LoadContext(kfe_fn, 13,), ),
pattern="[A-Z][A-Z][A-Z]:RIX:.*",
command="ALLOW",
# regex=re.compile("[A-Z][A-Z][A-Z]:KFE:.*"),
header="Some RIX devices temporarily on KFE subnet",
metadata={},
access=None,
),
groups=[],
),
]
)
|
import matplotlib.pyplot as plt
from matplotlib import animation
class Tensor:
def __init__(self,data,left=None,right=None,op = None):
self.data = data
self.grad = 0
self.left = left
self.right = right
self.op = op
def __add__(self, other):
data = self.data + other.data
t = Tensor(data,left = self,right=other,op = "add")
return t
def __sub__(self, other):
data = self.data - other.data
t = Tensor(data,left = self,right=other,op = "sub")
return t
def __mul__(self, other):
data = self.data * other.data
t = Tensor(data, left=self, right=other, op="mul")
return t
def __truediv__(self, other):
if other.data - 0 < 1e-9:
raise Exception("Can't divide zero")
data = self.data / other.data
t = Tensor(data, left=self, right=other, op="div")
return t
def backward(self,init_grad = 1):
# init_grad: 来自上一层的梯度
if self.left is not None:
if self.op == "add":
self.left.grad += 1 * init_grad
elif self.op == "sub":
self.left.grad += 1 * init_grad
elif self.op == "mul":
self.left.grad += self.right.data * init_grad
elif self.op == "div":
self.left.grad += 1 / self.right.data * init_grad
else:
raise Exception("Op unacceptable")
self.left.backward(self.left.grad)
if self.right is not None:
if self.op == "add":
self.right.grad += 1 * init_grad
elif self.op == "sub":
self.right.grad += -1 * init_grad
elif self.op == "mul":
self.right.grad += self.left.data * init_grad
elif self.op == "div":
self.right.grad += (-1 * self.left.data / (self.right.data*self.right.data)) * init_grad
else:
raise Exception("Op unacceptable")
self.right.backward(self.right.grad)
class Linear_regression:
def __init__(self):
self.w = Tensor(1.0)
self.b = Tensor(1.0)
self.lr = Tensor(0.02)
def fit(self,x,y,num_epochs = 60,show=True):
if show:
fig = plt.figure()
plt.scatter(x,y,color = 'r')
ims = []
for epoch in range(num_epochs):
losses = 0.0
for m,n in zip(x,y):
yp = self.w * Tensor(m) + self.b
loss = (Tensor(n) - yp) * (Tensor(n) - yp)
loss.backward()
self.w -= self.lr * Tensor(self.w.grad)
self.b -= self.lr * Tensor(self.b.grad)
self.w.grad = 0
self.b.grad = 0
# 切断计算图
self.w.left = None
self.w.right = None
self.b.right = None
self.b.left = None
losses += loss.data
print(losses)
if show:
im = plt.plot(x,[self.w.data * item + self.b.data for item in x],color = 'g')
ims.append(im)
if show:
ani = animation.ArtistAnimation(fig, ims, interval=200,
repeat_delay=1000)
ani.save("test.gif", writer='pillow')
if __name__ == "__main__":
TEST = False
TRAIN = True
if TEST:
a = Tensor(1.0)
b = Tensor(2.0)
c = a * b + a / b - a * a * a
c.backward()
print("grad \na:{} b:{}".format(a.grad,b.grad))
import torch
# 需要用小写的torch.tensor才能添加requires_grad参数
m = torch.tensor([[1.0]],requires_grad=True)
n = torch.tensor([[2.0]],requires_grad=True)
k = m * n + m / n - m * m * m
k.backward()
print("grad torch\nm:{} n:{}".format(m.grad.item(),n.grad.item()))
if TRAIN:
x = [1,2,3,4,5]
y = [6,5,4,3,2]
clf = Linear_regression()
clf.fit(x,y)
|
import logging
import logging.config
from src import settings
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"correlated": {
"()": "uvicorn.logging.DefaultFormatter",
"format": "%(levelprefix)s [%(name)s:%(lineno)s] %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "correlated",
},
},
"root": {"level": settings.LOG_LEVEL, "handlers": ["console"]},
}
logging.config.dictConfig(LOGGING_CONFIG)
|
#!/usr/bin/python3
from __future__ import print_function
import os.path
import pandas as pd
import mrcfile
from proteinpy.structure import Structure
from proteinpy.contact import Contact
from proteinpy.network import Network
from proteinpy.electron_density import ElectronDensity
class Parser:
"""Parse a PDB/DSSP/CCP4 file and returns a DataFrame object."""
def __init__(self, pdb_id, pdb_only=False):
"""Create a Parser object.
The Parser call a number of functions.
"""
self._pdb_id = pdb_id
self._pdb_df = pd.DataFrame(columns=['PDB','CHAIN_ID','RES_SEQ_N','RES_NAME',\
'ATOM_NAME','COOR_X','COOR_Y','COOR_Z','DSSP'])
self.electrondensity = ElectronDensity()
if pdb_only:
try:
self.fetch_pdb()
except:
pass
else:
try:
self.fetch_pdb
self.fetch_dssp()
self.fetch_ccp4()
except:
pass
self.structure = Structure(self._pdb_df)
self.contact = Contact(self.structure)
self.network = Network(self.contact)
# self.electrondensity = ElectronDensity()
def fetch_pdb(self):
"""Parse a PDB file and returns a DataFrame object."""
pdb_file = '../PDB/{}.pdb'.format(self._pdb_id)
global res_names, atom_names
# A list of dataframe column names.
column_names = ["PDB", "ATOM", "ATOM_SN", "ATOM_NAME", "ALT_LOC_ID", \
"RES_NAME", "CHAIN_ID", "RES_SEQ_N", "CHAIN_RES_N", \
"INSERT_RES", "COOR_X", "COOR_Y", "COOR_Z", "OCCUP", \
"B_FACTOR", "SEGMENT_ID", "ELEMENT_SYMBOL"]
retcol_names = ['PDB', 'CHAIN_ID', 'RES_SEQ_N', 'RES_NAME', 'ATOM_NAME', 'COOR_X',' COOR_Y', 'COOR_Z']
res_names = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', \
'GLN', 'GLU', 'GLY', 'HIS', 'ILE', \
'LEU', 'LYS', 'MET', 'PHE', 'PRO', \
'SER', 'THR', 'TRP', 'TYR', 'VAL', \
'HOH']
atom_names = ['N', 'C', 'CA', 'O', 'CB', 'OXT', \
'SD', 'CZ', 'CH2', 'CG1', 'NZ', 'ND2', \
'OD1', 'CD1', 'CE1', 'CE', 'CD2', 'CZ3', \
'SG', 'OE2', 'NE1', 'NE', 'CE2', 'CG', \
'NH2', 'OE1', 'CD', 'ND1', 'OH', 'NE2', \
'NH1', 'CG2', 'OD2', 'CZ2', 'CE3', 'OG', 'OG1']
if not os.path.exists(pdb_file):
print("ERROR! CANNOT FIND PDB FILE.")
return self
l = []
with open(pdb_file, 'r') as f:
for line in f.readlines():
if (line[:6].strip() in ["ATOM","HETATM"]) and (line[17:20].strip() in res_names) and (line[12:16].strip() in atom_names): # select all atoms (including water&residue) for pdb file; only water for symmetry pdb file
ATOM_CODE = line[:6].strip() # e.g. "HETATM"
ATOM_SN = line[6:11].strip() # Atom serial number
ATOM_NAME = line[12:16].strip() # e.g. "O"
ALT_LOC_ID = line[16].strip() # Alternative location indicator
RES_NAME = line[17:20].strip() # e.g. "HOH"
CHAIN_ID = line[21].strip() # e.g. "A"
RES_SEQ_N = line[22:26].strip() # Residue sequence number, e.g. "499"
CHAIN_RES_N = CHAIN_ID+'.'+RES_SEQ_N
INSERT_RES = line[26]
COOR_X, COOR_Y, COOR_Z = line[30:38], line[38:46], line[46:54] # "-11.662, 44.101, -1.975"
OCCUP = line[54:60]
B_FACTOR = line[60:66]
SEGMENT_ID = line[72:76]
ELEMENT_SYMBOL = line[76:78]
l.append([self._pdb_id, ATOM_CODE, ATOM_SN,ATOM_NAME,ALT_LOC_ID,RES_NAME,CHAIN_ID,RES_SEQ_N,CHAIN_RES_N,INSERT_RES,COOR_X,COOR_Y,COOR_Z,OCCUP,B_FACTOR,SEGMENT_ID,ELEMENT_SYMBOL])
pdb_df = pd.DataFrame(l, columns=column_names)
pdb_df['RES_SEQ_N'] = pdb_df['RES_SEQ_N'].astype('int64')
pdb_df[["COOR_X", "COOR_Y", "COOR_Z"]] = pdb_df[["COOR_X", "COOR_Y", "COOR_Z"]].astype('float')
pdb_df.drop_duplicates(subset=['COOR_X', 'COOR_Y', 'COOR_Z'], keep='first', inplace=True)
df = pdb_df[['PDB','CHAIN_ID','RES_SEQ_N','RES_NAME','ATOM_NAME','COOR_X','COOR_Y','COOR_Z']]
self._pdb_df = df
return self
def fetch_dssp(self):#, ccp4_file='../CCP4/{}.ccp4'.format(self.pdb_id)):
"""Parse a DSSP file and returns a DataFrame object."""
dssp_file = '../DSSP/{}.dssp'.format(self._pdb_id)
column_names = ["PDB", "RES_NAME", "CHAIN_ID", "RES_SEQ_N", "DSSP", "S_STRUCTURE"]
res_dict = {'A':'ALA', 'R':'ARG', 'N':'ASN', 'D':'ASP', 'C':'CYS', \
'Q':'GLN', 'E':'GLU', 'G':'GLY', 'H':'HIS', 'I':'ILE', \
'L':'LEU', 'K':'LYS', 'M':'MET', 'F':'PHE', 'P':'PRO', \
'S':'SER', 'T':'THR', 'W':'TRP', 'Y':'TYR', 'V':'VAL'}
dssp_dict = {'H':'Alpha helix', 'B':'Beta bridge', \
'E':'Strand', 'G':'Helix-3', \
'I':'Helix-5', 'T':'Turn', \
'S':'Bend', ' ':'Loop', \
'NIL':'NIL'}
ss_dict = {column: [] for column in column_names}
if not os.path.exists(dssp_file):
print("ERROR! CANNOT FIND DSSP FILE.")
return self
f = open(dssp_file, 'r')
datastarted = False
for line in f.readlines():
if line.split()[0]=="#" and line.split()[1]=="RESIDUE":
datastarted = True
if datastarted and line.split()[0]!="#" and (line[13:14] in res_dict.keys() or line[13:14].islower()):
RES_NAME = res_dict[line[13:14]] if line[13:14].isupper() else 'CYS'
CHAIN_ID = line[11:12]
RES_SEQ_N = (line[6:10])
RESI_N = CHAIN_ID + "."+ (RES_SEQ_N)
S_STRUCTURE = dssp_dict[line[16:17]]
ss_dict['PDB'].append(self._pdb_id)
ss_dict["RES_NAME"].append(RES_NAME)
ss_dict["CHAIN_ID"].append(CHAIN_ID)
ss_dict["RES_SEQ_N"].append(RES_SEQ_N)
ss_dict["DSSP"].append(line[16:17])
ss_dict["S_STRUCTURE"].append(S_STRUCTURE)
ss_df = pd.DataFrame(ss_dict, columns=column_names)
ss_df['RES_SEQ_N'] = ss_df['RES_SEQ_N'].astype('int64')
self._pdb_df = self._pdb_df.merge(ss_df.iloc[:, :-1], how='left', on=['PDB','RES_NAME','CHAIN_ID','RES_SEQ_N',]).fillna('NA')
return self
def fetch_ccp4(self):#, ccp4_file='../CCP4/{}.ccp4'.format(self.pdb_id)):
self.electrondensity = self.electrondensity.fetch_data(pdb_id=self._pdb_id)
return self
|
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document
from mayan.apps.documents.serializers.document_serializers import DocumentSerializer
from .models import DocumentCheckout
from .permissions import permission_document_check_out
class DocumentCheckoutSerializer(serializers.ModelSerializer):
document = DocumentSerializer()
class Meta:
extra_kwargs = {
'url': {
'lookup_url_kwarg': 'checkout_id',
'view_name': 'rest_api:checkedout-document-view'
},
}
fields = ('document', 'id', 'url')
model = DocumentCheckout
class NewDocumentCheckoutSerializer(serializers.ModelSerializer):
block_new_file = serializers.BooleanField()
document_pk = serializers.IntegerField(
help_text=_('Primary key of the document to be checked out.'),
write_only=True
)
expiration_datetime = serializers.DateTimeField()
class Meta:
fields = (
'block_new_file', 'document', 'document_pk',
'expiration_datetime', 'id',
)
model = DocumentCheckout
read_only_fields = ('document',)
write_only_fields = ('document_pk',)
def create(self, validated_data):
document = Document.objects.get(pk=validated_data.pop('document_pk'))
AccessControlList.objects.check_access(
obj=document, permissions=(permission_document_check_out,),
user=self.context['request'].user
)
validated_data['document'] = document
validated_data['user'] = self.context['request'].user
return super().create(validated_data=validated_data)
|
# The data we need to retrieve
# 1. The total number of votes cast
# 2. A complete list of candidates who received votes
# 3. The perecntage of votes each candidate won
# 4. The total number of votes each candidate won
# 5. The winner of the election based on popular vote
###
# # Assign a variable for the file to load and the path.
# file_to_load = 'Resources/election_results.csv'
# # open the lection results and read the file
# with open(file_to_load) as election_data:
# # to do: perform analysis
# print(election_data)
# # close the file
# election_data.close()
###
# add our dependencies
import csv
import os
# Assign a variable for the file to load and the path.
file_to_load = os.path.join("Resources", "election_results.csv")
# assign a variable to save the file to a path
file_to_save = os.path.join("analysis", "election_analysis.txt")
# initialize a total vote count
total_votes = 0
# list of candidates (square brackets)
candidate_options = []
# dictionary votes by candidate (curly brackets, key: value)
candidate_votes = {}
# winning candidate and winning count tracker
winning_candidate = "" # vairable with empty string value
winning_count = 0
winning_percentage = 0
# open the election results and read the file
with open(file_to_load) as election_data:
# read the file object with the reader function
file_reader = csv.reader(election_data)
# read and print the header row
headers = next(file_reader)
for row in file_reader:
#add to the total vote count
total_votes += 1
# retrieve candidate name from each row
candidate_name = row[2]
# add candidate name to candidate_options list if unique
if candidate_name not in candidate_options:
candidate_options.append(candidate_name)
# make each new candidate a key in the candidate_votes dictionary, set vote count to 0
candidate_votes[candidate_name] = 0
# add 1 to vote count
candidate_votes[candidate_name] += 1
# save results to our text file
with open(file_to_save, "w") as txt_file:
# store content for text file in variables
election_results = (
# insert header in the text file
"\nElection Results\n"
"-----------------------\n"
f"Total Votes: {total_votes:,}\n" # ":," is a thousands separator!
"-----------------------\n")
# print text file content
print(election_results, end="") # ending print with parameter end="" equal to am empty string so the last line is blank
# save final vote count to text file
txt_file.write(election_results)
# calculate % of votes for each candidate (and add to text file)
# iterate through candidate list (for each variable in dictionary)
for candidate_name in candidate_votes:
# get vote count for candidate (declare variable for number of votes: votes; dictionary_of_votes[value or number of votes])
votes = candidate_votes[candidate_name]
# calculate percentage of votes
# declare vote_percentage variable, declare number of votes per candidate and number of total votes as floats)
vote_percentage = float(votes) / float(total_votes) * 100
# vote_percentage = round(vote_percentage, 1), skill drill to round to 1 decimal place, done later a diff way
# print the candidate's name and their percentage of votes
# use print(f) to use code in a string (instead of it being literal) (":.1f" rounds float to 1 decimal place)
# print(f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# store content for text file in variable
candidate_results = (
f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n")
# print each candidate, their voter count, and percentage to the terminal
print(candidate_results)
# save candidate results to text file
txt_file.write(candidate_results)
# Determine winning candidate
# determine if the votes are greater than the winning count and percentage (start at 0)
# this iterates through candidates and replaces the value of the winners each time it encounters someone with higher values
if (votes > winning_count) and (vote_percentage > winning_percentage):
# if they are greater,
# make the new winning count and winning percentage equal to value of votes and vote_percentage for current candidate
# vote_percentage
winning_count = votes
winning_percentage = vote_percentage
# set current candidate to winning_candidate
winning_candidate = candidate_name
# print(winning_count, winning_percentage, winning_candidate) (just using to check winning candidate code)
# print out summary of winner
# create a variable to hold info
winning_candidate_summary = (
f"----------\n"
f"Winner: {winning_candidate}\n"
f"Winning Vote Count: {winning_count:,}\n" # ":," means rounded to 0 decimal places??
f"Winning Percentage: {winning_percentage:.1f}%\n" # rounding the float to 1 decimal place??
f"----------\n"
)
# save winning candidate's summary to text file
txt_file.write(winning_candidate_summary)
###
# # create a filename variable to a direct or indirect path to the file.
# file_to_save = os.path.join("analysis", "election_analysis.txt")
# # using the with statement open the file as a text file
# with open(file_to_save, "w") as txt_file:
# # write some data to the file
# txt_file.write("Hello World")
|
import base64
from django.core.management import call_command
from django.test import Client
from django.urls import reverse
from corehq.apps.domain.utils import clear_domain_names
from tastypie.models import ApiKey
from corehq.apps.accounting.models import (
BillingAccount,
DefaultProductPlan,
SoftwarePlanEdition,
Subscription,
SubscriptionAdjustment,
)
from corehq.apps.domain.models import Domain
from corehq.apps.export.models import (
PathNode,
ExportItem,
FormExportInstance,
CaseExportInstance,
ExportColumn, TableConfiguration)
from corehq.apps.users.models import WebUser
from corehq.pillows.mappings.case_mapping import CASE_INDEX_INFO
from corehq.pillows.mappings.xform_mapping import XFORM_INDEX_INFO
from corehq.util.elastic import ensure_index_deleted, reset_es_index
class OdataTestMixin(object):
view_urlname = None
@classmethod
def _set_up_class(cls):
cls.client = Client()
clear_domain_names('test_domain')
cls.domain = Domain(name='test_domain')
cls.domain.save()
cls.web_user = WebUser.create(cls.domain.name, 'test_user', 'my_password')
cls._setup_user_permissions()
cls.app_id = '1234'
cls.instance = cls.get_instance(cls.domain.name)
cls.instance.save()
@classmethod
def get_instance(cls, domain_name):
raise NotImplementedError()
@classmethod
def _teardownclass(cls):
cls.domain.delete()
@classmethod
def _setup_accounting(cls):
call_command('cchq_prbac_bootstrap')
cls.account, _ = BillingAccount.get_or_create_account_by_domain(cls.domain.name, created_by='')
plan_version = DefaultProductPlan.get_default_plan_version(SoftwarePlanEdition.ADVANCED)
cls.subscription = Subscription.new_domain_subscription(cls.account, cls.domain.name, plan_version)
@classmethod
def _teardown_accounting(cls):
SubscriptionAdjustment.objects.all().delete()
cls.subscription.delete()
cls.account.delete()
@classmethod
def _setup_user_permissions(cls):
cls.web_user.set_role(cls.domain.name, 'admin')
cls.web_user.save()
def _execute_query(self, credentials, view_url=None):
return self.client.get(view_url or self.view_url, HTTP_AUTHORIZATION='Basic ' + credentials)
@classmethod
def _get_correct_credentials(cls):
return OdataTestMixin._get_basic_credentials(cls.web_user.username, 'my_password')
@staticmethod
def _get_basic_credentials(username, password):
return base64.b64encode("{}:{}".format(username, password).encode('utf-8')).decode('utf-8')
@property
def view_url(self):
return reverse(self.view_urlname, kwargs={'domain': self.domain.name, 'config_id': self.instance._id})
class CaseOdataTestMixin(OdataTestMixin):
@classmethod
def get_instance(cls, domain_name):
return CaseExportInstance(
domain=domain_name,
is_odata_config=True,
transform_dates=False,
tables=[
TableConfiguration(
selected=True,
columns=[
ExportColumn(label='closed', selected=True,
# this is what exports generate for a base level property
item=ExportItem(
path=[PathNode(name='closed')])),
ExportColumn(label='date_modified', selected=True,
item=ExportItem(path=[
PathNode(name='date_modified')])),
ExportColumn(label='selected_property_1',
selected=True),
ExportColumn(label='selected_property_2',
selected=True),
ExportColumn(label='unselected_property'),
],
),
]
)
class FormOdataTestMixin(OdataTestMixin):
@classmethod
def get_instance(cls, domain_name):
return FormExportInstance(
domain=domain_name,
is_odata_config=True,
transform_dates=False,
tables=[
TableConfiguration(
selected=True,
columns=[
ExportColumn(label='received_on', selected=True,
item=ExportItem(
path=[PathNode(name='received_on')])),
ExportColumn(label='started_time', selected=True,
item=ExportItem(path=[
PathNode(name='form'),
PathNode(name='meta'),
PathNode(name='timeStart'),
])),
ExportColumn(label='selected_property_1',
selected=True),
ExportColumn(label='selected_property_2',
selected=True),
ExportColumn(label='unselected_property'),
],
),
]
)
def generate_api_key_from_web_user(web_user):
api_key = ApiKey.objects.get_or_create(user=web_user.get_django_user())[0]
api_key.key = api_key.generate_key()
api_key.save()
return api_key
def setup_es_case_index():
reset_es_index(CASE_INDEX_INFO)
def setup_es_form_index():
reset_es_index(XFORM_INDEX_INFO)
def ensure_es_case_index_deleted():
ensure_index_deleted(CASE_INDEX_INFO.index)
def ensure_es_form_index_deleted():
ensure_index_deleted(XFORM_INDEX_INFO.index)
|
from django.views.generic.base import View
from utils import json_response
class ReferralsView(View):
@staticmethod
def get(request):
'''Retrieve all referrals.'''
data = []
if request.GET['incoming'] == 'true':
data = [
{
'id': 2,
'client_name': 'Matthew Pannakuk',
'date_referred': '2017-02-02',
'referring_entity': 'Sts Peter and Paul',
'referring_to': 'your_org',
'referral_status': 'accepted',
'notes': [
{
'author': 'Sally Johnson',
'text': 'This man has been referred.',
'date': '2017-02-02'
}
]
},
{
'id': 3,
'client_name': 'John Drake',
'date_referred': '2017-02-12',
'referring_entity': 'Sts Peter and Paul',
'referring_to': 'your_org',
'referral_status': 'denied',
'notes': [
{
'author': 'Sally Johnson',
'text': 'This man has been referred.',
'date': '2017-02-12'
}
]
},
{
'id': 4,
'client_name': 'Vincent Samuels',
'date_referred': '2017-03-07',
'referring_entity': 'Covenant House',
'referring_to': 'your_org',
'referral_status': 'completed',
'notes': [
{
'author': 'Ben Champion',
'text': 'This man has been referred.',
'date': '2017-03-07'
}
]
},
{
'id': 1,
'client_name': 'Ivory Jackson',
'date_referred': '2017-05-02',
'referring_entity': 'St. Patrick Center',
'referring_to': 'your_org',
'referral_status': 'arrived',
'notes': [
{
'author': 'Jessica Lister',
'text': "I'm referring Ivory to you as he's couch surfing with a family member near your shelter. For now hes stable but if he needs an emergency shelter in the future, yours will likely be the closest. Also it will be easier for him to visit a caseworker at your site.",
'date': '2017-05-02'
}
]
},
]
else:
data = []
return json_response(data)
@staticmethod
def post(request):
'''Create a new referral.'''
return json_response()
class ReferralView(View):
@staticmethod
def get(request, referral_id):
'''Retrieve single referral.'''
return json_response()
@staticmethod
def post(request, referral_id):
'''Update single referral.'''
return json_response()
|
from django.urls import path
from books.views import add_book, details_book, edit_book, delete_book, BooksListView
urlpatterns = [
path('add/', add_book, name='add book'),
path('details/<int:pk>/', details_book, name='details book'),
path('edit/<int:pk>/', edit_book, name='edit book'),
path('delete/<int:pk>/', delete_book, name='delete book'),
path('list', BooksListView.as_view(), name='list books'), # Inherit from ListView
]
|
"""File that summarizes all key results.
To train and analyze all models quickly, run in command line
python paper.py -d=0 --train --analyze --testing
To reproduce the results from paper, run
python paper.py -d=0 --train --analyze
To analyze pretrained networks, run
python paper.py -d=0 --analyze
To run specific experiments (e.g. orn2pn, vary_pn), run
python paper.py -d=0 --train --analyze --experiment orn2pn vary_pn
"""
import os
import argparse
import copy
import standard.analysis_orn2pn
import standard.experiment_controls
SCRATCHPATH = '/share/ctn/projects/olfaction_evolution'
ROBERT_SCRATCHPATH = '/share/ctn/users/gy2259/olfaction_evolution'
PETER_SCRATCHPATH = '/share/ctn/users/yw2500/olfaction_evolution'
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', help='CUDA device number', default=0, type=int)
parser.add_argument('-t', '--train', help='Training', action='store_true')
parser.add_argument('-a', '--analyze', help='Analyzing', action='store_true')
parser.add_argument('-test', '--testing', help='For debugging', action='store_true')
parser.add_argument('-e', '--experiment', nargs='+', help='Experiments', default='core')
parser.add_argument('-cp', '--clusterpath', help='cluster path', default=SCRATCHPATH)
parser.add_argument('-c', '--cluster', help='Use cluster?', action='store_true')
parser.add_argument('-p','--pn', nargs='+', help='N_PN', default=[50])
parser.add_argument('--torch', help='Use torch', action='store_true')
args = parser.parse_args()
for item in args.__dict__.items():
print(item)
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)
TRAIN, ANALYZE, is_test, use_cluster, cluster_path = args.train, args.analyze, args.testing, args.cluster, args.clusterpath
# TRAIN = True
# use_cluster = True
# args.pn = [50]
# ANALYZE = True
# args.experiment =['control_vary_pn']
if ANALYZE:
import standard.analysis_pn2kc_training as analysis_pn2kc_training
import standard.analysis_activity as analysis_activity
import analytical.numerical_test as numerical_test
import analytical.analyze_simulation_results as analyze_simulation_results
# experiments
if args.experiment == 'core':
experiments = ['']
else:
experiments = args.experiment
# if 'control_nonnegative' in experiments:
# path = './files/control_nonnegative'
# if ANALYZE:
# sa.plot_weights(os.path.join(path, '000000'), sort_axis=1, average=False)
# sa.plot_weights(os.path.join(path, '000001'), sort_axis=1, average=False, positive_cmap=False, vlim=[-1, 1])
# for ix in range(0,2):
# standard.analysis_orn2pn.correlation_matrix(path, ix=ix, arg='ortho')
# standard.analysis_orn2pn.correlation_matrix(path, ix=ix, arg='corr')
#
# # # #sign constraint
# sa.plot_progress(path, ykeys=['glo_score','val_acc'], legend_key='sign_constraint_orn2pn')
# sa.plot_results(path, xkey='sign_constraint_orn2pn', ykey='glo_score')
# sa.plot_results(path, xkey='sign_constraint_orn2pn', ykey='val_acc')
# if 'control_orn2pn' in experiments:
# # Vary ORN n duplication under different nKC
# path = './files/control_orn2pn'
# if ANALYZE:
# default = {'ORN_NOISE_STD': 0, 'pn_norm_pre': 'batch_norm', 'kc_dropout_rate': 0.5, 'N_ORN_DUPLICATION':10, 'lr':1e-3}
# ykeys = ['glo_score', 'val_acc']
#
# for yk in ykeys:
# for xk, v in default.items():
# temp = copy.deepcopy(default)
# temp.pop(xk)
# if xk == 'lr':
# logx= True
# else:
# logx = False
# sa.plot_results(path, xkey=xk, ykey=yk,
# select_dict=temp, logx=logx)
#
# sa.plot_progress(path, select_dict=temp, ykeys=[yk], legend_key=xk)
# if 'control_pn2kc' in experiments:
# path = './files/control_pn2kc'
# if ANALYZE:
# default = {'pn_norm_pre': 'batch_norm', 'kc_dropout_rate': 0.5, 'lr': 1e-3, 'initial_pn2kc':0, 'train_kc_bias':True}
# ykeys = ['val_acc', 'K_inferred']
#
# for yk in ykeys:
# exclude_dict = None
# if yk in ['K_inferred', 'sparsity_inferred', 'K','sparsity']:
# exclude_dict = {'lr': [3e-3, 1e-2, 3e-2]}
#
# for xk, v in default.items():
# temp = copy.deepcopy(default)
# temp.pop(xk)
# if xk == 'lr':
# logx = True
# else:
# logx = False
# sa.plot_results(path, xkey=xk, ykey=yk,
# select_dict=temp, logx=logx)
#
# sa.plot_progress(path, select_dict=temp, ykeys=[yk],
# legend_key=xk, exclude_dict=exclude_dict)
# #
# res = standard.analysis_pn2kc_peter.do_everything(path, filter_peaks=False, redo=True)
# for xk, v in default.items():
# temp = copy.deepcopy(default)
# temp.pop(xk)
# sa.plot_xy(path, select_dict=temp, xkey='lin_bins_', ykey='lin_hist_', legend_key=xk, log=res,
# ax_args={'ylim':[0, 500]})
# if 'control_pn2kc_inhibition' in experiments:
# path = './files/control_pn2kc_inhibition'
# if ANALYZE:
# xkey = 'kc_recinh_coeff'
# ykeys = ['val_acc', 'K_inferred']
# # loop_key = 'kc_recinh_step'
# loop_key = None
# select_dict = {'kc_prune_weak_weights': False, 'kc_recinh_step': 9}
# for yk in ykeys:
# sa.plot_results(path, xkey=xkey, ykey=yk, loop_key=loop_key,
# select_dict=select_dict)
#
# sa.plot_progress(path, ykeys=[yk], legend_key=xkey,
# select_dict=select_dict)
#
# res = standard.analysis_pn2kc_peter.do_everything(path, filter_peaks=False, redo=True)
# sa.plot_xy(path, xkey='lin_bins_', ykey='lin_hist_', legend_key=xkey, log=res,
# ax_args={'ylim':[0, 500]})
if 'control_pn2kc_prune_boolean' in experiments:
n_pns = [int(x) for x in args.pn]
path = './files/control_pn2kc_prune_boolean'
if TRAIN:
raise ValueError('This remains to be fixed')
# for n_pn in n_pns:
# cur_path = path + '_' + str(n_pn)
# train(experiment_controls.control_pn2kc_prune_boolean(n_pn),
# save_path=cur_path)
# if ANALYZE:
# xkey = 'kc_prune_weak_weights'
# ykeys = ['val_acc', 'K_inferred','K']
# for n_pn in n_pns:
# cur_path = path + '_' + str(n_pn)
# for yk in ykeys:
# sa.plot_progress(cur_path, ykeys=[yk], legend_key=xkey)
#
# res = standard.analysis_pn2kc_peter.do_everything(cur_path, filter_peaks=False, redo=True, range=1)
# sa.plot_xy(cur_path, xkey='lin_bins_', ykey='lin_hist_', legend_key=xkey, log=res,
# ax_args={'ylim': [0, 500]})
if 'control_pn2kc_prune_hyper' in experiments:
n_pns = [int(x) for x in args.pn]
path = './files/control_pn2kc_prune_hyper'
if TRAIN:
raise ValueError('This remains to be fixed')
# for n_pn in n_pns:
# cur_path = path + '_' + str(n_pn)
# train(experiment_controls.control_pn2kc_prune_hyper(n_pn), control=True,
# save_path=cur_path)
# if ANALYZE:
# for n_pn in n_pns:
# cur_path = path + '_' + str(n_pn)
# default = {'N_KC': 2500, 'lr': 1e-3, 'initial_pn2kc':4./n_pn, 'kc_prune_threshold': 1./n_pn}
# ykeys = ['val_acc', 'K']
# for yk in ykeys:
# exclude_dict = None
# if yk in ['K_inferred', 'sparsity_inferred', 'K', 'sparsity']:
# # exclude_dict = {'lr': [3e-3, 1e-2, 3e-2]}
# pass
#
# for xk, v in default.items():
# temp = copy.deepcopy(default)
# temp.pop(xk)
# logx = True
# # sa.plot_results(cur_path, xkey=k, ykey=yk, figsize=(1.5, 1.5), ax_box=(0.27, 0.25, 0.65, 0.65),
# # select_dict=temp,
# # logx=logx)
# #
# # sa.plot_progress(cur_path, select_dict=temp, ykeys=[yk], legend_key=k, exclude_dict=exclude_dict)
# #
# res = standard.analysis_pn2kc_peter.do_everything(cur_path, filter_peaks=True, redo=True, range=.75)
# for xk, v in default.items():
# temp = copy.deepcopy(default)
# temp.pop(xk)
# sa.plot_xy(cur_path, select_dict=temp, xkey='lin_bins_', ykey='lin_hist_', legend_key=xk, log=res,
# ax_args={'ylim': [0, 500]})
# if 'control_vary_pn' in experiments:
# path = './files/control_vary_pn'
# if ANALYZE:
# sa.plot_weights(os.path.join(path,'000004'), sort_axis=1, average=False)
# sa.plot_weights(os.path.join(path,'000010'), sort_axis=1, average=False, vlim=[0, 5])
# sa.plot_weights(os.path.join(path,'000022'), sort_axis=1, average=False, vlim=[0, 5])
#
# ix = 22
# ix_good, ix_bad = analysis_orn2pn.multiglo_gloscores(path, ix, cutoff=.9, shuffle=False)
# analysis_orn2pn.multiglo_pn2kc_distribution(path, ix, ix_good, ix_bad)
# analysis_orn2pn.multiglo_lesion(path, ix, ix_good, ix_bad)
#
# default = {'kc_dropout_rate': 0.5, 'N_PN':50}
# ykeys = ['val_acc', 'glo_score']
# xticks = [20, 50, 100, 200, 1000]
# for ykey in ykeys:
# sa.plot_results(path, xkey='N_PN', ykey=ykey, figsize=(1.75, 1.75), ax_box=(0.3, 0.3, 0.65, 0.65),
# loop_key='kc_dropout_rate',
# logx=True, ax_args={'xticks': xticks}, plot_args={'alpha':0.7})
# sa.plot_results(path, xkey='N_PN', ykey=ykey, figsize=(1.75, 1.75), ax_box=(0.25, 0.25, 0.65, 0.65),
# loop_key='kc_dropout_rate', select_dict={'kc_dropout_rate':0.5},
# logx=True, ax_args={'xticks':xticks})
# sa.plot_progress(path, ykeys=[ykey], legend_key='N_PN', select_dict={'kc_dropout_rate':0.5})
# if 'control_vary_kc' in experiments:
# path = './files/control_vary_kc'
# if ANALYZE:
# sa.plot_weights(os.path.join(path, '000000'), sort_axis=1, average=False)
# sa.plot_weights(os.path.join(path, '000021'), sort_axis=1, average=False)
# # default = {'kc_dropout_rate': 0.5, 'N_KC':2500}
# # ykeys = ['val_acc', 'glo_score']
# # ylim, yticks = [0, 1.1], [0, .25, .5, .75, 1]
# # xticks = [50, 200, 1000, 2500, 10000]
# # for ykey in ykeys:
# # sa.plot_results(path, xkey='N_KC', ykey=ykey, figsize=(1.75, 1.75), ax_box=(0.3, 0.3, 0.65, 0.65),
# # loop_key='kc_dropout_rate',
# # logx=True, ax_args={'ylim': ylim, 'yticks': yticks, 'xticks': xticks}, plot_args={'alpha':0.7})
# # sa.plot_results(path, xkey='N_KC', ykey=ykey, figsize=(1.75, 1.75), ax_box=(0.25, 0.25, 0.65, 0.65),
# # loop_key='kc_dropout_rate', select_dict={'kc_dropout_rate':0.5},
# # logx=True, ax_args={'ylim': ylim, 'yticks': yticks, 'xticks':xticks})
# if 'train_kc_claws' in experiments:
# path = './files/train_kc_claws'
# if ANALYZE:
# sa.plot_progress(
# path, alpha=.75, linestyles=[':', '-'],
# legends=['Trained', 'Fixed']),
# sa.plot_weights(path, var_name='w_glo', sort_axis=-1, dir_ix=1)
# analysis_pn2kc_training.plot_distribution(path)
# analysis_pn2kc_training.plot_sparsity(path, dynamic_thres=False)
# if 'controls_receptor' in experiments:
# path = './files/controls_receptor'
# if ANALYZE:
# default = {'N_ORN_DUPLICATION': 10, 'or2orn_normalization': True, 'pn_norm_pre':'batch_norm'}
# sa.plot_results(path, xkey='N_ORN_DUPLICATION', ykey='or_glo_score',
# select_dict={'or2orn_normalization': True, 'pn_norm_pre':'batch_norm'}),
# sa.plot_results(path, xkey='or2orn_normalization', ykey='or_glo_score',
# select_dict={'N_ORN_DUPLICATION': 10, 'pn_norm_pre':'batch_norm'})
# sa.plot_results(path, xkey='pn_norm_pre', ykey='or_glo_score',
# select_dict={'N_ORN_DUPLICATION': 10, 'or2orn_normalization': True})
#
# sa.plot_results(path, xkey='N_ORN_DUPLICATION', ykey='combined_glo_score',
# select_dict={'or2orn_normalization': True, 'pn_norm_pre':'batch_norm'}),
# sa.plot_results(path, xkey='or2orn_normalization', ykey='combined_glo_score',
# select_dict={'N_ORN_DUPLICATION': 10, 'pn_norm_pre':'batch_norm'})
# sa.plot_results(path, xkey='pn_norm_pre', ykey='combined_glo_score',
# select_dict={'N_ORN_DUPLICATION': 10, 'or2orn_normalization': True})
#
# sa.plot_results(path, xkey='N_ORN_DUPLICATION', ykey='val_acc',
# select_dict={'or2orn_normalization': True, 'pn_norm_pre':'batch_norm'}),
# sa.plot_results(path, xkey='or2orn_normalization', ykey='val_acc',
# select_dict={'N_ORN_DUPLICATION': 10, 'pn_norm_pre':'batch_norm'})
# sa.plot_results(path, xkey='pn_norm_pre', ykey='val_acc',
# select_dict={'N_ORN_DUPLICATION': 10, 'or2orn_normalization': True})
# if 'vary_kc_claws' in experiments:
# path = './files/vary_kc_claws'
# if ANALYZE:
# import tools
# t = [1, 2, 9, 19, 29, 39, 49, 59, 69]
# for i in t:
# res = tools.load_all_results(path, argLast=False, ix=i)
# sa.plot_results(path, xkey='kc_inputs', ykey='log_val_loss',
# select_dict={'ORN_NOISE_STD':0}, res=res, string = str(i), figsize=(2, 2))
#
# sa.plot_progress(path, select_dict = {'kc_inputs':[7,15,30], 'ORN_NOISE_STD':0}, legends=['7', '15', '30'])
# # analysis_activity.sparseness_activity(path, 'kc_out')
# # import tools
# # for i in range(8):
# # res = tools.load_all_results(path, argLast=False, ix=i)
# # sa.plot_results(path, xkey='kc_inputs', ykey='train_loss',
# # select_dict={'ORN_NOISE_STD':0}, res=res, string = str(i))
#
# # sa.plot_results(path, xkey='kc_inputs', ykey='val_acc', loop_key='ORN_NOISE_STD',
# # figsize=(1.5, 1.5), ax_box=(0.27, 0.25, 0.65, 0.65),)
# sa.plot_results(path, xkey='kc_inputs', ykey='val_acc', select_dict={'ORN_NOISE_STD':0},
# figsize=(2, 2))
# # sa.plot_results(path, xkey='kc_inputs', ykey='log_val_loss', loop_key='ORN_NOISE_STD',
# # figsize=(1.5, 1.5), ax_box=(0.27, 0.25, 0.65, 0.65),
# # ax_args={'ylim':[-1, 2], 'yticks':[-1,0,1,2]})
# sa.plot_results(path, xkey='kc_inputs', ykey='log_val_loss', select_dict={'ORN_NOISE_STD': 0},
# figsize=(2, 2),
# ax_args={'ylim':[-1, 2], 'yticks':[-1,0,1,2]})
if 'vary_kc_activity_fixed' in experiments:
# Vary KC activity under different number of relabels
path = './files/vary_kc_activity_fixed'
if ANALYZE:
# sa.plot_results(path, xkey='n_trueclass', ykey='val_acc', loop_key='kc_dropout_rate')
analysis_activity.sparseness_activity(path, 'kc_out')
analysis_activity.plot_mean_activity_sparseness(path, 'kc_out', xkey='n_trueclass', loop_key='kc_dropout_rate')
if 'vary_kc_activity_trainable' in experiments:
# Vary KC activity under different number of relabels
path = './files/vary_kc_activity_trainable'
if ANALYZE:
analysis_pn2kc_training.plot_distribution(path)
analysis_pn2kc_training.plot_sparsity(path, dynamic_thres=True)
# sa.plot_results(path, xkey='n_trueclass', ykey='val_acc', loop_key='kc_dropout_rate')
# analysis_activity.sparseness_activity(path, 'kc_out')
# analysis_activity.plot_mean_activity_sparseness(path, 'kc_out', xkey='n_trueclass', loop_key='kc_dropout_rate')
# if 'apl' in experiments:
# # Adding inhibitory APL unit.
# path = './files/apl'
# if ANALYZE:
# analysis_activity.sparseness_activity(
# path, 'kc_out', activity_threshold=0., lesion_kwargs=None)
# lk = {'name': 'model/apl2kc/kernel:0',
# 'units': 0, 'arg': 'outbound'}
# analysis_activity.sparseness_activity(
# path, 'kc_out', activity_threshold=0., lesion_kwargs=lk,
# figname='lesion_apl_')
# if 'vary_orn_corr' in experiments:
# # Vary ORN correlation
# path = './files/vary_orn_corr'
# if ANALYZE:
# xkey = 'orn_corr'
# ykeys = ['val_acc', 'K_inferred', 'glo_score']
# progress_keys = ['log_val_loss', 'log_train_loss', 'val_loss',
# 'train_loss', 'val_acc', 'glo_score', 'K_inferred']
# for yk in ykeys:
# sa.plot_results(path, xkey=xkey, ykey=yk, figsize=(3.0, 1.5))
# sa.plot_progress(path, legend_key=xkey, ykeys=progress_keys)
if 'analytical' in experiments:
if TRAIN:
numerical_test.get_optimal_K_simulation()
if ANALYZE:
numerical_test.main_compare()
numerical_test.main_plot()
analyze_simulation_results.main() |
import matplotlib.pyplot as plt
import numpy as np
from lms_code.analysis.run_bem import get_slip_magnitude
import lms_code.lib.rep2 as rep2
import lms_code.plots.plot_all as lms_plot
def main():
lms_plot.setup()
fig = plt.figure()
which_model = 'all_details'
bem_soln = rep2.load('bem_' + which_model)
shortening = rep2.load('shortening_estimate_' + which_model)
est = shortening['lsqr_shortening']
est_low = est - shortening['lsqr_shortening_error']
est_high = est + shortening['lsqr_shortening_error']
total_length = 0.0
slip = 0.0
slip_low = 0.0
slip_high = 0.0
joint = [4.20012e5 + 1.6, -2.006e4 - 5]
for e in bem_soln['fault_mesh']:
if e.vertex1.loc[0] < joint[0] - 10:
continue
total_length += e.length
slip_mag = np.linalg.norm(get_slip_magnitude(e))
slip += e.length * est * slip_mag
slip_low += e.length * est_low * slip_mag
slip_high += e.length * est_high * slip_mag
s = (slip / total_length) / 1000
s_low = (slip_low / total_length) / 1000
s_high = (slip_high / total_length) / 1000
slip_err = s_high - s
# s = 6.1 / 1000
# s_low = 4.6 / 1000
# s_high = 7.6 / 1000
T = np.linspace(0, 3000, 100)
d = T * s
T_high = d / s_low
T_low = d / s_high
wenchuan_d = 4.0
wenchuan_T_low = wenchuan_d / s_low
wenchuan_T = wenchuan_d / s
wenchuan_T_high = wenchuan_d / s_high
print("Wenchuan recurrence: " + str(wenchuan_T) + " (low: " + str(wenchuan_T_low) + ", high: " + str(wenchuan_T_high) + ")")
a_wells = 6.93
b_wells = 0.82
mag7_ad = np.exp((7.0 - a_wells) / b_wells)
mag7_T = mag7_ad / s
paleo_T = 2300
paleo_ad = paleo_T * s
paleo_mag = (np.log(paleo_ad) * b_wells) + a_wells
plt.plot(d, T, 'k-')
plt.fill_between(d, T_low, T_high, facecolor = '#AAAAAA')
plt.plot([0, paleo_ad + 100], [paleo_T, paleo_T], 'k--')
plt.plot([wenchuan_d, mag7_ad, paleo_ad], [wenchuan_T, mag7_T, paleo_T],
linestyle = 'None',
marker = 'o',
markeredgewidth = 4.0,
markeredgecolor = (0, 0, 0, 1.0),
markerfacecolor = (1, 1, 1, 1.0),
markersize = 15)
# Plot Wenchuan
text = 'Wenchuan-like $\\textrm{M}_{\\textrm{w}}$ 7.9 (' + '%.0f'%wenchuan_d + ' m, ' +\
'%.0f'%wenchuan_T + ' years)'
plt.annotate(text, (wenchuan_d, wenchuan_T),
xytext = (wenchuan_d + 0.5, wenchuan_T - 50))
# Plot the Mw 7 pt
text = 'Typical $\\textrm{M}_{\\textrm{w}}$ 7.0 (' + '%.0f'%mag7_ad + ' m, ' +\
'%.0f'%mag7_T + ' years)'
plt.annotate(text, (mag7_ad, mag7_T),
xytext = (mag7_ad + 0.9, mag7_T - 30))
# Plot the paleoseismic pt
text = 'Low paleoseismic estimate'
plt.text(1.7, 2350, text)
text = '($Ran$ $et$ $al.$ 2010)'
plt.text(1.7, 2200, text)
text = '$\\textrm{M}_{\\textrm{w}}$ ' + '%0.f'%paleo_mag + ', ' + '%0.f'%paleo_ad + ' m'
plt.annotate(text, (paleo_ad, paleo_T),
xytext = (paleo_ad - 3.2, paleo_T + 30))
plt.text(2.0, 40, '($Wells$ $and$ $Coppersmith$ 1994)')
plt.text(0.5, 1800, 'average slip rate = ' + '%.1f'%(s * 1000) + ' $\pm$ %.1f'%(slip_err * 1000) + ' mm/yr')
plt.ylabel('$T$ (years)')
plt.xlabel('$d$ (meters)')
plt.ylim([0, 2500])
plt.xlim([0, 2500 * s])
width = 7.0
fig.set_size_inches([width, (6.0 / 8.0) * width])
plt.savefig('hazard_' + which_model)
if __name__ == '__main__':
main()
|
import numpy as np
DOMAINS = {
"N-term": [30, 43],
"TM1": [44, 86],
"EL1": [87, 91],
"TM2": [92, 153],
"IL1": [154, 163],
"TM3": [164, 206],
"EL2": [207, 207],
"TM4": [208, 263],
"IL2": [264, 265],
"TM5": [266, 319],
"EL3": [320, 323],
"TM6": [324, 366],
"TN1": [367, 388],
"NBD1": [389, 626],
"TM7": [704, 737],
"EL4": [738, 740],
"TM8": [741, 794],
"IL3": [795, 806],
"TM9": [807, 849],
"EL5": [850, 850],
"TM10": [851, 906],
"IL4": [907, 908],
"TM11": [909, 961],
"EL6": [962, 966],
"TM12": [967, 1009],
"TN2": [1010, 1029],
"NBD2": [1030, 1271],
}
DOMAINS_REV = {y: x for x, (a, b) in DOMAINS.items() for y in np.arange(a, b+1)} |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import json
import sys
from optparse import OptionParser
options = OptionParser(usage='%prog recording.json [options]', description='Converter for asciicast (asciinema recording) to vector graphic')
options.add_option('-s', '--start', type='int', default=-1, help='Start frame (default: first)')
options.add_option('-e', '--end', type='int', default=-1, help='End frame (default: last)')
options.add_option('-o', '--out', type='str', default='-', help='Filename to write vector graphic to (default: stdout)')
options.add_option('-t', '--type', type='str', default="svg", help='Vector graphic type: svg or tikz (default: svg)')
options.add_option('-b', '--background', action='store_true', default=False, help='No background')
options.add_option('-i', '--invert', action='store_true', default=False, help='Invert black and white')
options.add_option('-c', '--content', action='store_true', default=False, help='Do not add document structure (tikz only)')
options.add_option('-q', '--query', action='store_true', default=False, help='Show information about the JSON file and exit')
options.add_option('-v', '--version', action='store_true', default=False, help='Show version info and exit')
class ANSIParser:
def __init__(self, filename, start_line = -1, max_line = -1):
self.debug_output = False
self.filename = filename
with open(filename) as j:
self.meta = json.load(j)
cont = ""
for line_nr,line in enumerate(self.meta["stdout"]):
if start_line != -1 and line_nr < start_line: continue
if max_line != -1 and line_nr > max_line: break
cont += line[1]
self.lines = [ cont ]
self.width = self.meta["width"] + 1
self.height = self.meta["height"] + 1
self.fgcolor = 7
self.bgcolor = 0
self.bold = False
self.inverse = False
self.charset = 0
self.charset_enabled = False
self.max_row = 0
self.max_col = 0
self.no_background = False
self.invert_blackwhite = False
self.saved_row = 0
self.saved_col = 0
self.clear = {"char": ' ', "fgcolor": 7, "bgcolor": 0, "bold": False, "inverse": False, "charset": 0}
self.g1 = ["~", "◆", "▒", "␉", "␌", "␍", "␊", "°", "±", "", "␋", "┘", "┐", "┌", "└", "┼", "⎺", "⎻", "─", "⎼", "⎽", "├", "┤", "┴", "┬", "│", "≤", "≥", "π", "≠", "£", "·"]
self.colors = [
[1,1,1],
[222,56,43],
[57,181,74],
[255,199,6],
[0,111,184],
[118,38,113],
[44,181,233],
[204,204,204],
[128,128,128],
[255,0,0],
[0,255,0],
[255,255,0],
[0,0,255],
[255,0,255],
[0,255,255],
[255,255,255]
]
self.parse()
def debug(self, string):
if self.debug_output:
print(string, file=sys.stderr)
def appendChar(self, c):
self.row = min(max(0, self.row), self.height - 1)
self.col = min(max(0, self.col), self.width - 1)
if self.charset == 1:
idx = ord(c) - 0x5f
if idx >= 0 and idx < len(self.g1): c = self.g1[idx] #.decode("utf8")
self.screen[self.row][self.col] = {"char": c, "fgcolor": self.fgcolor, "bgcolor": self.bgcolor, "bold": self.bold, "inverse": self.inverse, "charset": self.charset}
self.col += 1
if self.col >= self.width:
self.col = 0
#self.row += 1
#if self.row >= self.height:
# self.row = 0
if self.row > self.max_row: self.max_row = self.row
if self.col > self.max_col: self.max_col = self.col
def parseANSI(self, line):
self.row = min(max(0, self.row), self.height - 1)
self.col = min(max(0, self.col), self.width - 1)
eoc = 0
while eoc < len(line) and not line[eoc].isalpha() and line[eoc] != '\x1b': eoc += 1
if eoc >= len(line): eoc = len(line) - 1
#print(line)
code = line[eoc]
special = False
if len(line) > 1 and line[1] == '?':
special = True
while eoc > 0 and not line[eoc].isalpha(): eoc -= 1
num = (line[1:eoc] if not special else line[2:eoc])
if len(num) > 0:
#print("Num: %s" % num)
if ";" in num:
num = [int(x) if len(x) > 0 else 1 for x in num.split(";")]
elif len(num) > 0:
num = int(num)
#print(line[0] + " / " + code)
eoc += 1
if line[0] == '[':
if code == 'A':
if num == '': num = 1
self.row -= num
if self.row < 0: self.row = 0
elif code == 'B':
if num == '': num = 1
self.row += num
if self.row >= self.height: self.row = self.height - 1
elif code == 'C':
if num == '': num = 1
self.col += num
if self.col >= self.width: self.col = self.width - 1
elif code == 'D':
if num == '': num = 1
self.col -= num
if self.col < 0: self.col = 0
elif code == 'm':
if not isinstance(num, list): num = [ num ]
for n in num:
if n == '' or n == 0:
self.fgcolor = 7
self.bgcolor = 0
self.bold = False
self.inverse = False
elif n == 1:
self.bold = True
elif n == 7:
self.inverse = True
elif n == 22:
self.bold = False
self.inverse = False
elif n == 24:
pass
elif n == 27:
self.inverse = False
elif n >= 30 and n <= 37:
self.fgcolor = n - 30
elif n >= 40 and n <= 47:
self.bgcolor = n - 40
elif n == 49:
self.bgcolor = 0
elif n == 39:
self.fgcolor = 7
elif n >= 90 and n <= 97:
self.fgcolor = n - 90
self.bold = True
elif n >= 100 and n <= 107:
self.bgcolor = n - 100
self.bold = True
else:
self.debug("Unhandled: ^]m: %s" % str(num))
elif code == 'J':
if num == '' or num == 0:
x = self.col
y = self.row
while y < self.height:
while x < self.width:
self.screen[y][x] = self.clear
x += 1
x = 0
y += 1
elif num == 1:
x = self.col
y = self.row
while y >= 0:
while x >= 0:
self.screen[y][x] = self.clear
x -= 1
x = self.width - 1
y -= 1
elif num == 2 or num == 3:
x = 0
y = 0
while y < self.height:
while x < self.width:
self.screen[y][x] = self.clear
x += 1
x = 0
y += 1
self.col = 0
self.row = 0
else:
self.debug("Unhandled: ^]J: %s" % str(num))
elif code == 'H' or code == 'f':
if isinstance(num, list):
self.row = num[0]
self.col = num[1]
elif num == '':
self.col = 0
self.row = 0
else:
self.col = 0
self.row = num
elif code == 'd':
self.row = num
self.col = 1
elif code == 'G':
if num == '':
self.col = 0
else:
self.col = num
elif code == 'K':
clear = dict(self.clear)
clear["bgcolor"] = self.bgcolor
if num == '' or num == 0:
for x in range(self.col, self.width): self.screen[self.row][x] = dict(clear)
elif num == 1:
for x in range(self.col): self.screen[self.row][x] = dict(clear)
elif num == 2:
for x in range(0, self.width): self.screen[self.row][x] = dict(clear)
else:
self.debug("Unhandled: ^]K: %s" % str(num))
elif code == 's':
self.saved_col = self.col
self.saved_row = self.row
elif code == 'u':
self.col = self.saved_col
self.row = self.saved_row
else:
self.debug("Unhandled: ^]%s (special: %s, num: %s)" % (code, "yes" if special else "no", str(num)))
if self.row > self.max_row: self.max_row = self.row
if self.col > self.max_col: self.max_col = self.col
return eoc
elif line[0] == '=' or line[0] == '>' or line[0] == '*' or line[0] == '+' or line[0] == ',' or line[0] == '-' or line[0] == '.' or line[0] == '/':
return 1
elif line[0] == ')':
self.charset_enabled = True
return 2
elif line[0] == '(':
return 2
elif line[0] == ']':
while line[eoc] != '\x07': eoc += 1
return eoc + 1
else:
self.debug("Unhandled: %s" % line[0])
return eoc
def parse(self):
self.screen = [[self.clear for x in range(self.width)] for y in range(self.height)]
self.row = 0
self.col = 0
for line in self.lines:
ci = 0
while ci < len(line):
c = line[ci]
if c != '\x1b':
if c == '\x0e' and self.charset_enabled:
self.charset = 1
elif c == '\x0f' and self.charset_enabled:
self.charset = 0
elif c == '\b':
if self.col > 0: self.col -= 1
elif c == '\r':
self.col = 0
elif c == '\n':
self.row += 1
else:
self.appendChar(c)
else:
ci += self.parseANSI(line[ci+1:])
ci += 1
if self.row > self.max_row: self.max_row = self.row
if self.col > self.max_col: self.max_col = self.col
def show_info(self):
print("Frames: %d" % len(self.meta["stdout"]))
print("Terminal (columns x rows): %d x %d" % (self.width, self.height))
print("")
print("asciicast version: %d" % (self.meta["version"]))
print("Environment: %s" % str(self.meta["env"]))
print("Duration: %.2fs" % self.meta["duration"])
def dump(self):
for line in self.screen:
pline = ""
for data in line:
pline += data["char"]
if len(pline.strip()) > 0: print(pline)
def sanitizeLatexChar(self, c):
if c == '%':
return "\%"
if c == '~':
return "\\textasciitilde"
if c == '^':
return "$\hat{}$"
if c == "#":
return "\\#"
if c == '_':
return "\\_"
if c == '\\':
return "\\textbackslash"
if c == '$':
return "\\$"
if c == '{':
return "\\{"
if c == '}':
return "\\}"
# wtf below here
if c == '\r' or c == '\n':
return ''
if c == '\b':
return ''
return c
def toTikz(self, header = False, footer = False, invert_blackwhite = False, background = True):
pline = ""
if header: pline += self.tikzHeader()
pline += "\\resizebox{\hsize}{!}{\\begin{tikzpicture}[yscale=-1]\\ttfamily\n"
if background: pline += "\\draw[fill=%s,draw=none] (-1em,-1em) rectangle +(%.1fem,%dem);\n" % ("ansi7" if invert_blackwhite else "ansi0", (self.max_col + 4) / 2.0, self.max_row + 2)
for row,line in enumerate(self.screen):
for col,data in enumerate(line):
if data["char"] == ' ' and ((data["inverse"] == False and data["bgcolor"] == 0) or (data["inverse"] and data["fgcolor"] == 0)):
continue
fg = data["fgcolor" if not data["inverse"] else "bgcolor"]
bg = data["bgcolor" if not data["inverse"] else "fgcolor"]
if invert_blackwhite:
if fg == 0: fg = 7
elif fg == 7: fg = 0
if bg == 0: bg = 7
elif bg == 7: bg = 0
bgcol = "ansi%d" % bg
if not background and ((not invert_blackwhite and bg == 0) or (invert_blackwhite and bg == 7)): bgcol = "none"
if data["bold"] and not (invert_blackwhite and fg == 7 and not background): fg += 8
pline += "\\draw[fill=%s,draw=none] (%.1fem,%dem) rectangle +(0.5em,1em) node[pos=.5, anchor=base, yshift=-0.5ex] {\\textcolor{ansi%d}{%s}};\n" % (bgcol, col / 2.0, row, fg, self.sanitizeLatexChar(data["char"]))
pline += "\\end{tikzpicture}}"
if footer: pline += self.tikzFooter()
return pline
def tikzColors(self):
line = ""
for i,c in enumerate(self.colors):
line += "\\definecolor{ansi%d}{RGB}{%d,%d,%d}\n" % (i, c[0], c[1], c[2])
return line
def tikzHeader(self):
line = "\\documentclass{article}\n"
line += "\\usepackage[utf8]{inputenc}\n"
line += "\\usepackage{tikz}\n"
line += "\\usepackage{color}\n"
line += "\\usepackage{amssymb}\n"
line += "\\usepackage{pmboxdraw}\n"
line += self.tikzColors()
line += "\\begin{document}\n"
return line
def tikzFooter(self):
return "\\end{document}\n"
def toSVG(self, invert_blackwhite = False, background = True):
header = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\" \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n<svg version=\"1.1\" preserveAspectRatio=\"xMinYMin meet\" xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" x=\"0em\" y=\"0em\" width=\"%.1fem\" height=\"%dem\" viewBox=\"0 0 %.1f %d\" font-family=\"monospace\" >\n"
footer = "</svg>"
char = "<rect x=\"%.1f\" y=\"%.1f\" width=\"1\" height=\"1\" style=\"fill:rgb(%d,%d,%d);stroke-width:0;\" /><text transform=\"translate(%.1f, %.2f)\" alignment-baseline=\"baseline\" style=\"font-size:1;fill:rgb(%d,%d,%d);\">%s</text>\n"
char_nobg = "<text transform=\"translate(%.1f, %.2f)\" alignment-baseline=\"baseline\" style=\"font-size:1;fill:rgb(%d,%d,%d);\">%s</text>\n"
pline = header % ((self.max_col + 4) / 2.0, self.max_row + 2, (self.max_col + 4) / 2.0, self.max_row + 2, )
bcol = self.colors[0] if not invert_blackwhite else self.colors[7]
if background: pline += "<rect width=\"%.1f\" height=\"%d\" x=\"0\" y=\"0\" style=\"fill:rgb(%d,%d,%d);stroke-width:0;\" />" % ((self.max_col + 4) / 2.0, self.max_row + 2, bcol[0], bcol[1], bcol[2])
for row,line in enumerate(self.screen):
for col,data in enumerate(line):
if data["char"] == ' ' and ((data["inverse"] == False and data["bgcolor"] == 0) or (data["inverse"] and data["fgcolor"] == 0)):
continue
fg = data["fgcolor" if not data["inverse"] else "bgcolor"]
bg = data["bgcolor" if not data["inverse"] else "fgcolor"]
if invert_blackwhite:
if fg == 0: fg = 7
elif fg == 7: fg = 0
if bg == 0: bg = 7
elif bg == 7: bg = 0
bgcol = self.colors[bg]
if not background and ((not invert_blackwhite and bg == 0) or (invert_blackwhite and bg == 7)): bgcol = None
if data["bold"] and not (invert_blackwhite and fg == 7 and not background): fg += 8
if bgcol is None:
pline += char_nobg % (col / 2.0 - 0.1 + 1, row + .5 + 1, self.colors[fg][0], self.colors[fg][1], self.colors[fg][2], data["char"])
else:
pline += char % (col / 2.0 + 1, row + 0.7, bgcol[0], bgcol[1], bgcol[2], col / 2.0 - 0.1 + 1, row + .5 + 1, self.colors[fg][0], self.colors[fg][1], self.colors[fg][2], data["char"])
pline += footer
return pline
def main():
opts, args = options.parse_args()
if opts.version:
print("asciicast2vector 1.0")
return
if len(args) < 1:
options.print_help()
return
try:
p = ANSIParser(args[0], opts.start, opts.end)
except:
print("Could not parse '%s'" % args[0])
return
#p.dump()
if opts.query:
p.show_info()
return
if opts.type == "svg":
img = p.toSVG(invert_blackwhite = opts.invert, background = not opts.background)
elif opts.type == "tikz":
img = p.toTikz(header = not opts.content, footer = not opts.content, invert_blackwhite = opts.invert, background = not opts.background)
else:
print("Unknown output format: %s" % opts.type)
return
if opts.out == "-":
print(img)
else:
try:
with open(opts.out, "w") as out:
out.write(img)
except:
print("Could not save file '%s'" % opts.out)
return
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from collections import deque
def breadth_first_search(graph, start):
search_queue = deque()
search_queue += start
visited = set()
while search_queue:
node = search_queue.popleft()
for each in graph[node]:
if each not in visited:
search_queue += graph[each]
visited.add(node)
return visited
|
import os.path
from deployer import Deployer
# This script expects that the following environment vars are set:
#
# AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain
# AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID
# AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret
# AZURE_SUBSCRIPTION_ID: with your target subscription ID
my_subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id
msg = "\nInitializing the Deployer class with subscription id: {}\n\n"
msg = msg.format(my_subscription_id)
print(msg)
# Initialize the deployer class
deployer = Deployer(my_subscription_id)
print("Beginning the deployment... \n\n")
# Deploy the template
#deployer.createResourceGroup("MyResourceGroup")
deployer.resource_group="MyResourceGroup"
my_deployment = deployer.deployAtResourceGroupScope("vnets.json")
my_deployment = deployer.deployAtSubscriptionScope('resourcegroups.json')
print("Done deploying!!")
|
import influxdb_client
import pandas as pd
from configs import config
class Measurements:
def __init__(self) -> None:
self.client = influxdb_client.InfluxDBClient(
url=config["influx"]["host"],
token=config["influx"]["token"],
org=config["influx"]["org"]
)
self.query_api = self.client.query_api()
def for_station(self, station_id: str, date_from: str, date_to: str, conv_win_size=1) -> pd.DataFrame:
return self.query_api.query_data_frame(f"""
from(bucket:"{config["influx"]["bucket"]}")
|> range(start: {date_from}, stop: {date_to})
|> filter(fn: (r) => r["stationId"] == "{station_id}")
|> aggregateWindow(every: {conv_win_size}h, fn: mean, createEmpty: false)
|> yield(name: "mean")
""")
|
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def page(query_set,request):
paginator = Paginator(query_set,5)
page = request.GET.get('page')
try:
page_query = paginator.page(page)
except PageNotAnInteger:
page_query = paginator.page(1)
except EmptyPage:
page_query = paginator.page(paginator, num_pages)
return page_query
def get_range(paged_set):
index = paged_set.number-1
max_index = len(paged_set.paginator.page_range)
start_index = index-3 if index>=3 else 0
end_index = index+3 if index<= max_index -3 else max_index
page_range = list(paged_set.paginator.page_range)[start_index:end_index]
return page_range
|
'''
Utility classes for testing
'''
from django.test import TestCase
from django.contrib.auth.models import User
from ...posts.models import Post
from ..models import Organization
class BaseTestOrganizations(TestCase):
def setUp(self):
'''
Must follow this order
'''
self.owner = self._create_organization_owner()
self.organization = self._create_organization()
self.owner_posts = self._create_posts_from_owner()
def tearDown(self):
'''
Delete all objects created
'''
self.owner.delete()
def _create_organization_owner(self):
'''
Create the User that will own the organization.
Save the attributes so we can test that the user exists
'''
self.username = "testing"
self.email = "test@email.com"
self.password = "testing#2020"
owner = User.objects.create_user(
username = self.username,
password = self.password,
email=self.email
)
return owner
def _create_posts_from_owner(self):
'''
Create posts within the test organization authored by the owner
'''
self.post_texts = [
"Hello world",
"This is a test post",
]
posts = []
for text in self.post_texts:
post = Post.objects.create(
author = self.owner,
organization = self.organization,
text = text,
)
posts.append(post)
return posts
def _create_organization(self):
'''
Create an organization.
Save the attributes to test that the organization exists
'''
self.organization_name = "Test Org Inc."
self.organization_description = "for testing"
organization = Organization.objects.create(
owner = self.owner,
name = self.organization_name,
description = self.organization_description,
)
return organization
|
import numpy as np
class LogisticRegression:
def __init__(self, learning_rate, n_iters):
self.learning_rate = learning_rate
self.n_iters = n_iters
self.weights = None
self.bias = None
def fit(self, features, target):
# initalize weights and bias
n_samples, n_features = features.shape
self.weights = np.zeros(n_features)
self.bias = 0
# for-loop to update the weights (gradient decent)
for _ in range(self.n_iters):
log_model = 1 / (1 + np.exp(-(np.dot(features, self.weights) + self.bias)))
# derivate of weights and bias
dw = (1 / n_samples) * np.dot(features.T, (log_model - target))
db = (1 / n_samples) * np.sum(log_model - target)
# updates weights
self.weights -= self.learning_rate * dw
self.bias -= self.bias * db
def predict(self, features):
log_model = 1 / (1 + np.exp(-(np.dot(features, self.weights) + self.bias)))
pred_class = [1 if i > 0.5 else 0 for i in log_model]
return pred_class
|
from setuptools import setup # type: ignore
# Old way. Will be deleted.
# def _get_version():
# """Get the version but do not import the package."""
# with open(os.path.join(os.path.dirname(__file__), 'bomberman', 'version.py')) as version_file:
# return re.compile(r"^__version__ = '(.*?)'", re.S).match(version_file.read()).group(1)
setup()
|
from .plotHandler import *
from .spatialPlotters import *
from .hourglassPlotters import *
from .onedPlotters import *
from .ndPlotters import *
from .specialPlotters import *
from .neoDistancePlotter import *
from .plotBundle import *
from .twoDPlotters import *
from .moPlotters import *
from .perceptual_rainbow import *
from .nightPointingPlotter import *
from .hgPlotters import *
|
from generate import *
from datetime import datetime
def main():
''' e.g.
python ./generate.py --length=512
--nsamples=1
--prefix=[MASK]哈利站在窗边
--tokenizer_path cache/vocab_small.txt
--topk 40 --model_path model/model_epoch29
--save_samples --save_samples_path result/20210915_29_1135
--model_config model/model_epoch29/config.json --repetition_penalty 1.05 --temperature 1.1
'''
parser = argparse.ArgumentParser()
parser.add_argument('--key', default='intro', type=str, required=False, help='哪个模型')
parser.add_argument('--model_v', default='-1', type=str, required=False, help='第几个模型')
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='生成设备')
parser.add_argument('--length', default=1024, type=int, required=False, help='生成长度')
parser.add_argument('--batch_size', default=1, type=int, required=False, help='生成的batch size')
parser.add_argument('--nsamples', default=1, type=int, required=False, help='生成几个样本')
parser.add_argument('--temperature', default=1.1, type=float, required=False, help='生成温度')
parser.add_argument('--topk', default=20, type=int, required=False, help='最高几选一')
parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')
parser.add_argument('--model_config', default='config/model_config_small.json', type=str, required=False,
help='模型参数')
parser.add_argument('--tokenizer_path', default='cache/vocab_small.txt', type=str, required=False, help='词表路径')
parser.add_argument('--model_path', default='model/final_model', type=str, required=False, help='模型路径')
parser.add_argument('--prefix', default='哈利站在窗边', type=str, required=False, help='生成文章的开头')
parser.add_argument('--no_wordpiece', action='store_true', help='不做word piece切词')
parser.add_argument('--segment', action='store_true', help='中文以词为单位')
parser.add_argument('--fast_pattern', action='store_true', help='采用更加快的方式生成文本')
parser.add_argument('--save_samples', default=True, help='保存产生的样本')
parser.add_argument('--save_samples_path', default='.', type=str, required=False, help="保存样本的路径")
parser.add_argument('--repetition_penalty', default=1.05, type=float, required=False)
args = parser.parse_args()
print('args:\n' + args.__repr__())
if args.model_v != '-1':
args.model_path = '{}/model_epoch{}'.format(args.model_path.split('/')[0], args.model_v)
else:
args.model_path = args.model_path
t = str(datetime.now())
d = ''.join('_'.join(''.join(t.split(":")[:-1]).split(' ')).split('-'))
args.save_samples_path = 'result_{}/{}_v{}'.format(args.key, d, args.model_v)
Generate().run(args)
if __name__ == '__main__':
main()
|
import logging
from datetime import datetime
from sqlalchemy.exc import IntegrityError
from haikubot import config
from haikubot.commands.commands import Commands
from haikubot.model.haiku import Haiku
from haikubot.utils.color import string_to_color_hex
from haikubot.utils.haiku_parser import is_haiku
from haikubot.utils.analyser import (
get_longest_word_haiku,
get_most_words_haiku,
get_least_words_haiku,
)
from haikubot.utils.wordclouder import generate_cloud
from haikubot.utils.timeliner import generate_timeline
def good_user(user):
if user is None or len(user) < 3:
return False
return True
def dict_to_haiku(haiku_dict):
return Haiku(
haiku_dict["haiku"], haiku_dict["author"], haiku_dict["link"], haiku_dict["id"]
)
class CommandsParser:
def __init__(self, store):
self.store = store
def handle_command(self, command, action_user):
logging.debug("Command {} recieved from user {}".format(command, action_user))
if command.startswith(Commands.ADD_MOD.value):
return self.add_mod(
command.replace(Commands.ADD_MOD.value, "").strip(), action_user
)
elif command.startswith(Commands.REMOVE_MOD.value):
return self.remove_mod(
command.replace(Commands.REMOVE_MOD.value, "").strip(), action_user
)
elif command.startswith(Commands.LIST_MOD.value):
return self.list_mods()
elif command.startswith(Commands.ADD_HAIKU.value):
return self.add_haiku(
command.replace(Commands.ADD_HAIKU.value, "").strip(), action_user
)
elif command.startswith(Commands.DELETE_HAIKU.value):
return self.delete_haiku(
command.replace(Commands.DELETE_HAIKU.value, "")
.strip()
.replace("#", ""),
action_user,
)
elif command.startswith(Commands.STATS_TOP.value):
return self.stats_top(
command.replace(Commands.STATS_TOP.value, "").strip().replace("#", "")
)
elif command.startswith(Commands.STATS_LONGEST.value):
return self.stats_longest()
elif command.startswith(Commands.STATS_MOST.value):
return self.stats_most()
elif command.startswith(Commands.STATS_FEWEST.value):
return self.stats_fewest()
elif command.startswith(Commands.STATS_TIMELINE.value):
return self.stats_timeline(
command.replace(Commands.STATS_TIMELINE.value, "")
.strip()
.replace("#", "")
)
elif command.startswith(Commands.LAST_HAIKU.value):
return self.show_last_haiku()
elif command.startswith(Commands.SHOW_FROM.value):
clean = command.replace(Commands.SHOW_FROM.value, "").strip()
author, num = clean.strip().split(" ")
return self.show_from_haiku(author, num)
elif command.startswith(Commands.SHOW_ID.value):
return self.show_id_haiku(
command.replace(Commands.SHOW_ID.value, "").strip().replace("#", "")
)
elif command.startswith(Commands.EXPORT.value):
return self.plain_export(
command.replace(Commands.EXPORT.value, "").strip().replace("#", "")
)
elif command.startswith(Commands.WORDCLOUD.value):
return self.wordcloud(
command.replace(Commands.WORDCLOUD.value, "").strip().replace("#", "")
)
return False, "Invalid command. Currently supported commands: " + str(
Commands.manpage()
)
def add_mod(self, user, action_user):
if not self.store.is_mod(action_user):
logging.debug("User not haikumod")
return "User '{}' is not a haikumod".format(action_user)
if good_user(user):
try:
self.store.put_mod(user)
except IntegrityError:
return "'{}' is already a haikumod".format(user)
return "'{}' added as haikumod.".format(user)
else:
return "'{}' is not a valid username.".format(user)
def remove_mod(self, user, action_user):
if not self.store.is_mod(action_user):
logging.debug("User not haikumod")
return "User '{}' is not a haikumod".format(action_user)
if good_user(user):
self.store.remove_mod(user)
return "'{}' has been removed as haikumod.".format(user)
else:
return "'{}' is not a valid username.".format(user)
def list_mods(self):
return "Current mods are: " + str(self.store.get_mods())
def show_last_haiku(self):
haiku, eid = self.store.get_newest()
if haiku is None:
return False, "Could not find any haiku"
return haiku, eid
def show_from_haiku(self, search, num=None):
if num is None:
haikus = self.store.get_by(search)
else:
haikus = self.store.get_by(search, num)
return haikus
def show_id_haiku(self, haiku_id):
try:
haiku_id = int(haiku_id)
except ValueError:
return False, '"{}" is not a valid number'.format(haiku_id)
try:
haiku = self.store.get_haiku(haiku_id)
if haiku is None:
return False, "Could not find haiku with id #{}".format(haiku_id)
return haiku
except AttributeError:
return False, "Could not find haiku with id #{}".format(haiku_id)
def stats_top(self, num):
if len(num) < 1:
num = None
else:
try:
num = int(num)
except ValueError:
return False, '"{}" is not a valid number'.format(num)
stats = self.store.get_haiku_stats(num)
return stats
def stats_longest(self):
haikus = self.store.get_all_haiku()
longest, word = get_longest_word_haiku(haikus)
return longest, word
def stats_most(self):
haikus = self.store.get_all_haiku()
most_recent, number_of_words, other_long_haikus_ids = get_most_words_haiku(
haikus
)
return most_recent, number_of_words, other_long_haikus_ids
def stats_fewest(self):
haikus = self.store.get_all_haiku()
most_recent, number_of_words, other_short_haikus_ids = get_least_words_haiku(
haikus
)
return most_recent, number_of_words, other_short_haikus_ids
def plain_export(self, search=None):
if search is None:
logging.debug("Found no search parameter, exporting everything.")
haikus = self.store.get_all_haiku()
elif len(search) < 3:
logging.debug("Found search parameter but not long enough, aborting.")
return False, '"{}" is not descriptive enough'.format(user)
else:
logging.debug('Exporting by query "{}"'.format(search))
haikus = self.store.get_by(search, num=-1)
if len(haikus) < 1:
return False
export_max = config.GROUP_HAIKU_EXPORT_SIZE
for c in range(0, len(haikus), export_max):
haikus_simple = ""
iteration_max = export_max if c + export_max < len(haikus) else len(haikus)
for i in range(c, iteration_max):
haikus_simple += "Haiku #{} by {}:\n".format(
haikus[i]["id"], haikus[i]["author"]
)
haikus_simple += haikus[i]["haiku"]
haikus_simple += "\n"
return haikus_simple
def wordcloud(self, search):
is_sprint = search == "sprint"
if is_sprint:
search = ""
if len(search) < 1:
logging.debug("Found no author, making wordcloud for everything.")
if not is_sprint:
haikus = self.store.get_all_haiku()
else:
haikus = self.store.get_all_haiku_weeks(3)
if len(haikus) == 0:
return False, "Couldn't find any haikus from the last 3 weeks."
search = "everyone"
elif len(search) < 3:
logging.debug("Found search parameter but not long enough, aborting.")
return False, '"{}" is not descriptive enough'.format(search)
else:
logging.debug("Found author, looking for haiku by {}.".format(search))
haikus = self.store.get_by(search, num=-1)
if len(haikus) < 1:
return False, 'Found no haikus by "{}"'.format(search)
haiku_blob = "".join([str(haiku["haiku"]) for haiku in haikus])
if search == "everyone":
image = generate_cloud(haiku_blob)
else:
image = generate_cloud(haiku_blob, string_to_color_hex(haikus[0]["author"]))
filename = "Wordcloud for {}, {}.png".format(search, str(datetime.today()))
return image, filename
def stats_timeline(self, search):
anonymous = "anonymous" in search
is_sprint = "sprint" in search
if is_sprint:
search = ""
if not is_sprint:
haikus = self.store.get_all_haiku()
else:
haikus = self.store.get_all_haiku_weeks(3)
if len(haikus) == 0:
return False, "Couldn't find any haikus from the last 3 weeks."
image = generate_timeline(haikus, anonymous)
filename = "Timeline until {}.png".format(str(datetime.today()))
return image, filename
def add_haiku(self, haiku_string, author, source):
haiku_split = haiku_string.replace("\r", "").split("\n")
if len(haiku_split) > 3 and is_haiku(haiku_split[0:3]):
haiku = Haiku("\n".join(haiku_split[0:3]), haiku_split[3].title(), source)
if len(haiku.author) > 8:
if not self.store.has_posted_haiku(haiku.author):
if not (len(haiku_split) > 4 and haiku_split[4] == "Yes"):
return (
"{} doesn't have any existing haiku, are you sure the name is"
" correct? Repeat the request with a 'Yes' on a new line to verify,".format(
haiku.author
)
)
try:
self.store.put_haiku_model(haiku)
except IntegrityError:
return "{} tried posting a duplicate haiku, boo!".format(author)
return "Added haiku #{}.".format(haiku.hid)
else:
return "'{}' is not a valid author name".format(haiku_split[3])
else:
return (
"That's either not a valid haiku, or you forgot to supply and author. Remember to"
" linebreak after the command, then supply 3 individual lines with the fourth line "
"being the author.".format(haiku_string)
)
def delete_haiku(self, haiku_id, action_user):
if not self.store.is_mod(action_user):
logging.debug("User not mod")
return "User '{}' is not a haikumod".format(action_user)
try:
id = int(haiku_id)
except ValueError:
return '"{}" is not a valid haiku id'.format(haiku_id)
deleted_haiku = self.store.get_haiku(haiku_id)
result = self.store.remove_haiku(haiku_id)
success = result.rowcount > 0
if success:
return deleted_haiku, haiku_id
return "There was no haiku with id #{}".format(haiku_id)
|
# Copyright Notice:
# Copyright 2018 Dell, Inc. All rights reserved.
# License: BSD License. For full license text see link: https://github.com/RedDrum-Redfish-Project/RedDrum-Simulator/LICENSE.txt
# BullRed-RackManager managersBackend resources
#
class RdManagersBackend():
# class for backend managers resource APIs
def __init__(self,rdr):
self.version=1
self.rdr=rdr
# update resourceDB and volatileDict properties
def updateResourceDbs(self,managerid, updateStaticProps=False, updateNonVols=True ):
self.rdr.logMsg("DEBUG","--------BACKEND updateResourceDBs. updateStaticProps={}".format(updateStaticProps))
# the simulator backend does not update database after discovery, so we can return 0 False here
return(0,False)
# DO action: "Reset", "action
def doManagerReset(self,managerid,resetType):
self.rdr.logMsg("DEBUG","--------BACKEND managerReset. resetType={}".format(resetType))
rc=0
return(rc)
# DO Patch to Manager (DateTime, DateTimeOffset)
def doPatch(self, managerid, patchData):
# the front-end has already validated that the patchData and managerid is ok
# so just send the request here
self. rdr.logMsg("DEBUG","--------BACKEND Patch manager: {} data. patchData={}".format(managerid,patchData))
rc=0
return(rc)
# update NetworkProtocolsDb Info
def updateManagerNetworkProtocolsDbFromBackend(self, mgrid, noCache=False):
return(0)
# update EthernetInterface Info
def updateManagerEthernetEnterfacesDbFromBackend(self, mgrid, noCache=False, ethid=None):
return(0)
|
import requests, os, shutil, stat, struct
from sys import platform
def getPathDriver(config):
# Figure out the system platform
if platform == "linux" or platform == "linux2":
sys_platform = 'linux'
elif platform == "darwin":
sys_platform = 'macos'
elif platform == "win32":
sys_platform = 'windows'
# Figure out the system architecture
bit_system = struct.calcsize("P") * 8
paths = {}
# handle chrome
if config['CHROME'].getboolean('USE_CHROME') and config['CHROME']['CHROME_GECKODRIVER_LOCATION'] == 'None':
if sys_platform == 'linux':
driver_url = config['CHROME']['linux64']
elif sys_platform == 'macos':
driver_url = config['CHROME']['macos']
elif sys_platform == 'windows':
driver_url = config['CHROME']['windows']
path_to_driver = downloadAndExtract(driver_url, 'chromedriver')
elif config['CHROME'].getboolean('USE_CHROME') and config['CHROME']['CHROME_GECKODRIVER_LOCATION']:
path_to_driver = config['CHROME']['CHROME_GECKODRIVER_LOCATION']
else:
path_to_driver = False
paths['chrome'] = path_to_driver
# handle firefox
if config['FIREFOX'].getboolean('USE_FIREFOX') and config['FIREFOX']['FIREFOX_GECKODRIVER_LOCATION'] == 'None':
if sys_platform == 'linux' and bit_system == 32:
driver_url = config['FIREFOX']['linux32']
elif sys_platform == 'linux' and bit_system == 64:
driver_url = config['FIREFOX']['linux64']
elif sys_platform == 'macos':
driver_url = config['FIREFOX']['macos']
elif sys_platform == 'windows' and bit_system == 32:
driver_url = config['FIREFOX']['windows32']
elif sys_platform == 'windows' and bit_system == 64:
driver_url = config['FIREFOX']['windows64']
path_to_driver = downloadAndExtract(driver_url, 'geckodriver')
elif config['FIREFOX'].getboolean('USE_FIREFOX') and config['FIREFOX']['FIREFOX_GECKODRIVER_LOCATION']:
path_to_driver = config['FIREFOX']['FIREFOX_GECKODRIVER_LOCATION']
else:
path_to_driver = False
paths['firefox'] = path_to_driver
return paths
# Download and extract a browser driver archive
def downloadAndExtract(url, driver_name):
arc_path = os.path.join(os.getcwd(), url.split('/')[-1])
driver_dir = os.path.join(os.getcwd(), 'drivers')
driver_path = os.path.join(driver_dir, driver_name)
# Check if the driver is already present
if not os.path.exists(driver_path):
# Download the file
r = requests.get(url, allow_redirects=True)
open(arc_path, 'wb').write(r.content)
r.close()
# Extract the archive
shutil.unpack_archive(arc_path, driver_dir)
# Change permissions
os.chmod(driver_path, stat.S_IXUSR)
# Delete downloaded archive
os.remove(arc_path)
# Return the path of the driver
return driver_path
|
#!python
from __future__ import with_statement
import glob
import os.path
import shutil
import zipfile
def isFileEntry(s):
return s[-1] != '/'
def readFile(fname):
with open(fname, 'rb') as f:
return f.read()
def writeFile(fname, data):
with open(fname, 'wb') as fo:
fo.write(data)
def mkdirs(path):
try:
os.makedirs(path)
except os.error:
pass
def upgradeArchive(src, dst, pkgdir, backupdir, inhibitOverwrite):
mkdirs(pkgdir)
mkdirs(os.path.dirname(dst))
newar = zipfile.ZipFile(src)
newfiles = set(filter(isFileEntry, newar.namelist()))
oldar = None
oldfiles = set()
try:
oldar = zipfile.ZipFile(dst)
oldfiles = set(filter(isFileEntry, oldar.namelist()))
except (zipfile.error, IOError):
pass
# delete any orphaned files
if not inhibitOverwrite:
orphanedFiles = oldfiles - newfiles
for f in orphanedFiles:
# backup the file, if the user has modified it
try:
orig = oldar.read(f)
user = readFile(os.path.join(pkgdir, f))
if user != orig:
backupFile = os.path.join(backupdir, f)
mkdirs(os.path.dirname(backupFile))
writeFile(backupFile, user)
except (os.error, IOError):
pass
try:
os.remove(os.path.join(pkgdir, f))
except (os.error, IOError):
pass
# extract any new files
for f in newfiles - oldfiles:
fname = os.path.join(pkgdir, f)
mkdirs(os.path.dirname(fname))
try:
user = readFile(fname)
if user:
if inhibitOverwrite:
continue;
else:
# Backup the old file
backupFile = os.path.join(backupdir, f)
mkdirs(os.path.dirname(backupFile))
writeFile(backupFile, user)
except (os.error, IOError):
pass
writeFile(fname, newar.read(f))
# Remove the old archive, so we won't try and merge anything twice in case
# we get a failure below
try:
os.remove(dst)
except (os.error, IOError):
pass
# upgrade each file
if not inhibitOverwrite:
for f in oldfiles & newfiles:
fname = os.path.join(pkgdir, f)
orig = oldar.read(f)
new = newar.read(f)
if new != orig:
# backup the file, if the user has modified it
try:
user = readFile(fname)
if user != orig:
backupFile = os.path.join(backupdir, f)
mkdirs(os.path.dirname(backupFile))
writeFile(backupFile, user)
except (os.error, IOError):
pass
writeFile(fname, newar.read(f))
# copy the zip
shutil.copy(src, dst)
def removeArchive(dst, pkgdir):
oldar = None
oldfiles = set()
try:
oldar = zipfile.ZipFile(dst)
oldfiles = set(filter(isFileEntry, oldar.namelist()))
except (zipfile.error, IOError):
pass
oldar.close()
# delete any orphaned files
orphanedFiles = oldfiles
for f in orphanedFiles:
try:
os.remove(os.path.join(pkgdir, f))
except (os.error, IOError):
pass
# delete the archive and destination
try:
os.remove(dst)
except (os.error, IOError):
pass
try:
os.rmdir(pkgdir)
except (os.error, IOError):
pass
def srcNewer(src, dst):
try:
return (os.path.getmtime(src) > os.path.getmtime(dst)
or os.path.getsize(src) != os.path.getsize(dst))
except (os.error, IOError):
return True
def upgradePackage(pkg, pristinedir, datadir, backupdir):
pristinePkg = os.path.join(pristinedir, os.path.basename(pkg))
# if the zip is different from the one in pristinedir
if srcNewer(pkg, pristinePkg):
(base, ext) = os.path.splitext(os.path.basename(pkg))
inhibitOverwrite = (base == "User")
upgradeArchive(pkg, pristinePkg, os.path.join(datadir, base),
os.path.join(backupdir, base), inhibitOverwrite)
def upgrade(appdir, userdir, pristinedir, datadir, backupdir):
packages = (glob.glob(appdir + "/*.sublime-package") +
glob.glob(userdir + "/*.sublime-package"))
for pkg in packages:
upgradePackage(pkg, pristinedir, datadir, backupdir)
# Delete any packages that are no longer around
depzips = (set([os.path.basename(x) for x in glob.glob(pristinedir + "/*.sublime-package")])
- set([os.path.basename(x) for x in packages]))
for dz in depzips:
pz = os.path.join(pristinedir, dz)
(base, ext) = os.path.splitext(dz)
removeArchive(pz, os.path.join(datadir, base))
|
def resolve():
'''
code here
'''
grid = [[int(i) for i in input().split()] for _ in range(3)]
is_flag = True
for i in range(3):
A = grid[0][i-1] -grid[0][i]
C = grid[2][i-1] -grid[2][i]
B = grid[1][i-1] -grid[1][i]
if A == B and B == C:
pass
else:
is_flag = False
print('Yes') if is_flag else print('No')
if __name__ == "__main__":
resolve()
|
#! python3
"""Transforms text to a markdown bulleted list based on indentation.
Contains:
`bullet_list(text: str) -> list[str]`
: Converts the text passed to it into a markdown list.
: Each item in list is a line of the text ending in a newline.
`from_clipboard() -> None`
: Takes text from clipboard and transforms it into a markdown list.
: Places transformed text into clipboard.
- Can be run as a module in another file or as a script in a CLI.
- Dependency: pyperclip module.
"""
import re
import io
def bullet_list(text: str, as_todo: bool = False) -> list:
r"""
Converts the text passed to it into a markdown list based on indentation.
Args:
`text` (str): The text containing test to be made into a markdown list.
Returns:
list: Contains each line of the text. Each item ends with `"\n"`
"""
lines = text.splitlines()
bullet = '-'
if as_todo:
bullet = '- [ ]'
# Define one indent as being two spaces to match Markdown indent.
one_indent = r'\s{2}'
pattern = re.compile(one_indent)
# Obtain number of indents in the line with the least number of indents.
min_num_indents = min([len(pattern.findall(line)) for line in lines
if pattern.match(line)])
# Adjust pattern to find lines of list items based on min_num_indents.
pattern = re.compile(
fr'''
^({one_indent}){{{min_num_indents}}} # == (' ' * 2) * min_num_indents
([ ]*\w) # Captured to adjust for nested list items.
''',
re.VERBOSE)
# Add markdown-style list bullets to lines matching intent rule.
for i, line in enumerate(lines):
# Replacing leading min_num_indents with "- "
newline = pattern.sub(fr'{bullet} \2', line, 1)
# Adjust for nested list items.
pattern2 = re.compile(fr'^[{bullet}]((\s){{2,}}) \w')
match = pattern2.match(newline)
if match:
min_num_indents = len(match.group(1)) // min_num_indents
# Remove excess space after '-' and appropriately nest list items.
newline = newline.replace(match.group(1), '').replace(
f'{bullet}', f'{" " * min_num_indents}{bullet}')
# Replace line in lines with newline
lines[i] = f'{newline}\n'
return lines
def from_clipboard(as_todo: bool = False) -> None:
"""
Uses bullet_list() with clipboard. Uses pyperclip.
Passes copied text to bullet_list(), makes returned value into text,
and copies transformed text to the clipboard.
"""
# No reason to include pyperclip in the namespace of the entire file.
import pyperclip
# Get text from the clipboard.
last_copied = pyperclip.paste()
# Write text into a memory buffer to get proper output format.
membuffer = io.StringIO()
membuffer.writelines(bullet_list(last_copied, as_todo))
# Read text from memory buffer into clipboard.
pyperclip.copy(membuffer.getvalue())
membuffer.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/4 上午10:49
# @Author : Dicey
# @File : AWSAutoTagTotal3.0.py
# @Software: PyCharm
from __future__ import print_function
import json
import boto3
import logging
from AWSAutotagTotal import awstaglib
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
detail = event['detail']
eventname = detail['eventName']
tagger = awstaglib.Tagger(event)
if eventname == 'RunInstances':
# 启动 EC2
tagger.tag_ec2()
elif eventname == 'CreateTable':
# Dynamodb
tagger.tag_dynamodb()
elif eventname == 'CreateFunction20150331':
# Lambda
tagger.tag_lambda()
elif eventname == 'CreateDBInstance':
# RDS
tagger.tag_rds()
elif eventname == 'CreateCluster':
# RedShift
tagger.tag_redshift()
elif eventname == 'CreateBucket':
# 给 S3 桶打标签
tagger.tag_s3_bucket()
elif eventname == 'PutObject':
# 给 S3 中 Object 打标签
tagger.tag_s3_object()
elif eventname == 'CreateQueue':
# SQS
tagger.tag_sqs()
elif eventname == 'CreateVpc':
# VPC
tagger.tag_vpc()
else:
logger.warning('Not supported action')
return False
logger.info("Success!")
return True |
class Movie:
def __init__(self, movie_id, movie_title, release_date):
self.movie_id = movie_id
self.movie_title = movie_title
self.release_date = release_date
self.ratings = {}
def __str__(self):
return "Movie ID: {} Movie Title {}".format(self.movie_id, self.movie_title)
def add_rating(self, movie_id, user_id, rating):
if self.movie_id == movie_id:
self.ratings.update({user_id: rating})
class User:
def __init__(self, user_id, movie_id, rating):
self.user_id = user_id
self.ratings = {movie_id : rating}
def __str__(self):
return "User ID: {} Number of ratings: {}".format(self.user_id, len(self.ratings))
def add_rating(self, user_id, movie_id, rating):
if self.user_id == user_id:
self.ratings.update({movie_id : rating})
class Rating:
def __init__(self, movies, users):
self.movies = movies #{movie_id : movie_object}
self.users = users #{user_id : user object}
def average_rating(self, movie_id):
movie = self.movies[movie_id]
return (sum(movie.ratings.values())) / len(movie.ratings)
def users_ratings(self, user_id):
user = self.users[user_id]
return user.ratings
def movies_ratings(self, movie_id):
movie = self.movies[movie_id]
return movie.ratings
def get_movie_title(self, movie_id):
movie = self.movies[movie_id]
return movie.movie_title
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from ralph.cmdb.tests.utils import ServiceCatalogFactory
from ralph.discovery.tests.util import DeviceFactory
from ralph_assets.models_assets import Orientation
from ralph_assets.rest.serializers.models_dc_asssets import (
TYPE_ACCESSORY,
TYPE_ASSET,
)
from ralph_assets.tests.utils.assets import (
AssetFactory,
RackFactory,
RackAccessoryFactory,
)
class TestRestAssetInfoPerRack(TestCase):
def setUp(self):
User.objects.create_superuser('test', 'test@test.test', 'test')
self.client = APIClient()
self.client.login(username='test', password='test')
self.rack_1 = RackFactory(max_u_height=3)
rack_2 = RackFactory()
self.dev_1 = DeviceFactory(name="h001.dc")
self.dev_1.management_ip = '10.20.30.1'
self.dev_2 = DeviceFactory(name="h002.dc")
self.dev_2.management_ip = '10.20.30.2'
self.asset_1 = AssetFactory(
device_info__position=1,
device_info__slot_no='',
device_info__ralph_device_id=self.dev_1.id,
service=ServiceCatalogFactory(name='Alpha Service'),
)
self.asset_2 = AssetFactory(
device_info__position=2,
device_info__slot_no='',
device_info__ralph_device_id=self.dev_2.id,
service=ServiceCatalogFactory(name='Beta Service'),
)
asset_3 = AssetFactory()
self.rack_1.deviceinfo_set.add(self.asset_1.device_info)
self.rack_1.deviceinfo_set.add(self.asset_2.device_info)
rack_2.deviceinfo_set.add(asset_3.device_info)
self.pdu_1 = AssetFactory(
device_info__rack=self.rack_1,
device_info__orientation=Orientation.left,
device_info__position=0,
)
self.rack_1.deviceinfo_set.add(self.pdu_1.device_info)
self.rack1_accessory = RackAccessoryFactory(
rack=self.rack_1,
server_room=self.rack_1.server_room,
data_center=self.rack_1.server_room.data_center,
orientation=Orientation.front,
)
self.rack2_accessory = RackAccessoryFactory(
rack=rack_2,
server_room=rack_2.server_room,
data_center=rack_2.server_room.data_center,
orientation=Orientation.front,
)
def tearDown(self):
self.client.logout()
def test_get(self):
core_url = '/ui/search/info/{0}'
returned_json = json.loads(
self.client.get(
'/assets/api/rack/{0}/'.format(self.rack_1.id)
).content
)
self.maxDiff = None
expected_json = {
'info': {
'id': self.rack_1.id,
'name': self.rack_1.name,
'data_center': self.rack_1.data_center.id,
'server_room': self.rack_1.server_room.id,
'max_u_height': self.rack_1.max_u_height,
'visualization_col': self.rack_1.visualization_col,
'visualization_row': self.rack_1.visualization_row,
'free_u': self.rack_1.get_free_u(),
'description': '{}'.format(self.rack_1.description),
'orientation': '{}'.format(self.rack_1.get_orientation_desc()),
'rack_admin_url': reverse(
'admin:ralph_assets_rack_change', args=(self.rack_1.id,),
)
},
'devices':
[
{
'_type': TYPE_ASSET,
'id': self.asset_1.id,
'hostname': self.dev_1.name,
'url': '{}'.format(self.asset_1.get_absolute_url()),
'core_url': core_url.format(
self.asset_1.device_info.ralph_device_id),
'category': '{}'.format(self.asset_1.model.category),
'barcode': self.asset_1.barcode,
'sn': '{}'.format(self.asset_1.sn),
'height': float(self.asset_1.model.height_of_device),
'position': self.asset_1.device_info.position,
'model': self.asset_1.model.name,
'children': [],
'front_layout': u'',
'back_layout': u'',
'management_ip': self.dev_1.management_ip.address,
'service': self.asset_1.service.name,
'orientation': 'front',
},
{
'_type': TYPE_ASSET,
'id': self.asset_2.id,
'hostname': self.dev_2.name,
'url': '{}'.format(self.asset_2.get_absolute_url()),
'core_url': core_url.format(
self.asset_2.device_info.ralph_device_id),
'category': '{}'.format(self.asset_2.model.category),
'barcode': self.asset_2.barcode,
'sn': '{}'.format(self.asset_2.sn),
'height': float(self.asset_2.model.height_of_device),
'position': self.asset_2.device_info.position,
'model': self.asset_2.model.name,
'children': [],
'front_layout': u'',
'back_layout': u'',
'management_ip': self.dev_2.management_ip.address,
'service': self.asset_2.service.name,
'orientation': 'front',
},
{
'_type': TYPE_ACCESSORY,
'orientation': 'front',
'position': self.rack1_accessory.position,
'remarks': self.rack1_accessory.remarks,
'type': self.rack1_accessory.accessory.name,
},
],
'pdus': [
{
'model': self.pdu_1.model.name,
'orientation': 'left',
'url': self.pdu_1.get_absolute_url(),
'sn': '{}'.format(self.pdu_1.sn)
},
]
}
self.assertEqual(returned_json, expected_json)
|
if __name__ == '__main__':
import os
import torch
import numpy as np
from options import TestOption
from pipeline import CustomDataset
from networks import Generator
from utils import Manager, binning_and_cal_pixel_cc
from torch.utils.data import DataLoader
from tqdm import tqdm
ITERATION = 470000
STD = 0
#abcsafdasdf
MODEL_NAME = 'pix2pix'
torch.backends.cudnn.benchmark = True
dir_input = './datasets/Over_{}_std/Test/Input'.format(str(STD))
dir_target = './datasets/Over_{}_std/Test/Target'.format(str(STD))
dir_model = './checkpoints/Over_{}_std/Model/{}'.format(str(STD), MODEL_NAME)
path_model = './checkpoints/Over_{}_std/Model/{}/{}_G.pt'.format(str(STD), MODEL_NAME, str(ITERATION))
dir_image_save = './checkpoints/Over_{}_std/Image/Test/{}/{}'.format(str(STD), MODEL_NAME, str(ITERATION))
os.makedirs(dir_image_save, exist_ok=True)
opt = TestOption().parse()
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_ids)
device = torch.device('cuda:0')
dataset = CustomDataset(opt)
test_data_loader = DataLoader(dataset, batch_size=1, num_workers=2, shuffle=False)
G = Generator(opt).to(device)
G.load_state_dict(torch.load(path_model))
manager = Manager(opt)
list_TUMF_fake = list()
list_TUMF_real = list()
list_cc_1x1_fake = list()
list_cc_1x1_real = list()
list_cc_1x1 = list()
list_cc_bin_2x2 = list()
list_cc_bin_4x4 = list()
list_cc_bin_8x8 = list()
list_R1 = list()
list_R2 = list()
circle_index = list()
k = 0
for i in range(1024):
for j in range(1024):
if (i - 511) ** 2 + (j - 511) ** 2 <= 392 ** 2:
circle_index.append(k)
k += 1
with torch.no_grad():
G.eval()
for input, target, _, name in tqdm(test_data_loader):
input = input.to(device)
fake = G(input)
manager.save_image(fake, path=os.path.join(dir_image_save, name[0] + '_fake.png'))
manager.save_image(target, path=os.path.join(dir_image_save, name[0] + '_real.png'))
# # Model measurements
# np_fake = fake.cpu().numpy().squeeze() * 100.
# np_real = target.cpu().numpy().squeeze() * 100.
# np_fake_flatten, np_real_flatten = np_fake.flatten(), np_real.flatten()
# # rearrange [-100, 100]
# carrier_fake, carrier_real = list(), list()
#
# for i in circle_index:
# list_cc_1x1_fake.append(np_fake_flatten[i])
# list_cc_1x1_real.append(np_real_flatten[i])
# if abs(np_fake_flatten[i]) >= 10.:
# carrier_fake.append(abs(np_fake_flatten[i]))
# if abs(np_real_flatten[i]) >= 10.:
# carrier_real.append(abs(np_real_flatten[i]))
#
# TUMF_fake, TUMF_real = np.array(carrier_fake).sum(), np.array(carrier_real).sum()
# list_TUMF_fake.append(TUMF_fake)
# list_TUMF_real.append(TUMF_real)
# list_R1.append((TUMF_fake - TUMF_real) / TUMF_real)
#
# list_cc_1x1.append(np.corrcoef(list_cc_1x1_fake, list_cc_1x1_real)[0][1])
# list_R2.append(((np.array(list_cc_1x1_fake) - np.array(list_cc_1x1_real)) ** 2).sum() / (
# np.array(list_cc_1x1_real) ** 2).sum())
#
# # list_cc_bin_2x2.append(binning_and_cal_pixel_cc(np_fake, np_real, 2))
# # list_cc_bin_4x4.append(binning_and_cal_pixel_cc(np_fake, np_real, 4))
# list_cc_bin_8x8.append(binning_and_cal_pixel_cc(np_fake, np_real, 8))
#
# del input, target, fake, np_fake, np_real, np_fake_flatten, np_real_flatten, carrier_fake, carrier_real
# del TUMF_fake, TUMF_real, _, name
#
# cc_TUMF = np.corrcoef(list_TUMF_fake, list_TUMF_real)[0][1]
# cc_1x1 = np.mean(list_cc_1x1)
# # cc_bin_2x2 = np.mean(list_cc_bin_2x2)
# # cc_bin_4x4 = np.mean(list_cc_bin_4x4)
# cc_bin_8x8 = np.mean(list_cc_bin_8x8)
#
# R1_mean = np.mean(list_R1)
# R1_std = np.std(list_R1)
#
# R2_mean = np.mean(list_R2)
# R2_std = np.std(list_R2)
#
# with open(os.path.join(dir_image_save, 'Analysis.txt'), 'wt') as analysis:
# analysis.write(str(ITERATION) + ', ' + str(cc_TUMF) + ', ' + str(cc_1x1) + ', ' +
# # str(cc_bin_2x2) + ', ' + str(cc_bin_4x4) + ', ' +
# str(cc_bin_8x8) + ', ' +
# str(R1_mean) + ', ' + str(R1_std) + ', ' + str(R2_mean) + ', ' + str(R2_std) + '\n')
# analysis.close()
|
class Term(object):
def __init__(self, **kwargs):
self._name = kwargs.get('name', None)
self._type = kwargs.get('type', None)
self._value = kwargs.get('value', None)
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def value(self):
return self._value
def is_variable(self):
return self._name is not None
def is_typed(self):
return self._type is not None
def is_constant(self):
return self._value is not None
@classmethod
def variable(cls, name, type=None):
return Term(name=name, type=type)
@classmethod
def constant(cls, value, type=None):
return Term(value=value, type=type)
def __str__(self):
if self.is_variable() and self.is_typed():
return '{0} - {1}'.format(self._name, self._type)
if self.is_variable():
return '{0}'.format(self._name)
if self.is_constant() and self.is_typed():
return '{0} - {1}'.format(self._value, self._type)
if self.is_constant():
return '{0}'.format(self._value)
|
import time
import bpy
import mathutils
class RootMotionData(bpy.types.PropertyGroup):
hip = bpy.props.StringProperty(name="Hip Bone")
root = bpy.props.StringProperty(name="Root Bone")
copy = bpy.props.StringProperty(name="Debug Character")
step = bpy.props.IntProperty(name="Step Size", default=3, min=1)
no_rot = bpy.props.BoolProperty(name="Ignore Rotation")
do_vert = bpy.props.BoolProperty(name="Extract Vertical Motion")
# TODO maybe implement these someday
#bias_trans = bpy.props.FloatProperty(name="Translation Bias", default=0.1, min=0, soft_max=0.5)
#bias_rot = bpy.props.FloatProperty(name="Rotation Bias", default=0.7854, min=0, soft_max=3.14159, subtype='ANGLE')
class ANIM_OT_extract_root_motion(bpy.types.Operator):
"""Transfer hip bone motion to root bone"""
bl_idname = "anim.rm_extract_root_motion"
bl_label = "Create Root Motion"
bl_options = {'REGISTER', 'UNDO'}
skel = None
@classmethod
def poll(cls, context):
return valid_armature(context) is not None
def modal(self, context, event):
ref = debug_character(context, self.skel)
data = context.scene.rm_data
frames = self.skel.animation_data.action.frame_range
expr = "\"%s\"" % data.hip
curves = self.skel.animation_data.action.fcurves
for c in curves:
if expr in c.data_path:
curves.remove(c)
root = self.skel.pose.bones[data.root]
ref_hip = ref.pose.bones[data.hip]
ref_hip_vec = (ref_hip.head - ref_hip.tail)
ref_hip_vec.z = 0
ref_mtx = world_mtx(ref, ref_hip)
for f in steps(context, frames):
context.scene.frame_set(f)
mtx = world_mtx(ref, ref_hip)
mtx_trans = mathutils.Matrix.Translation(mtx.translation - ref_mtx.translation)
if not data.do_vert:
mtx_trans.translation.z = 0
root.matrix = pose_mtx(self.skel, root, mtx_trans)
if not data.no_rot:
hip_vec = (ref_hip.head - ref_hip.tail)
hip_vec.z = 0
root.rotation_quaternion = ref_hip_vec.rotation_difference(hip_vec)
root.scale = (1, 1, 1)
root.keyframe_insert(data_path="location")
root.keyframe_insert(data_path="rotation_quaternion")
hip = self.skel.pose.bones[data.hip]
for f in range(round(frames.x), round(frames.y) + 1):
context.scene.frame_set(f)
hip.matrix = pose_mtx(self.skel, hip, world_mtx(ref, ref_hip))
hip.keyframe_insert(data_path="rotation_quaternion")
hip.keyframe_insert(data_path="location")
hip.keyframe_insert(data_path="scale")
return {'FINISHED'}
def invoke(self, context, event):
self.skel = valid_armature(context)
context.scene.frame_set(self.skel.animation_data.action.frame_range.x)
data = context.scene.rm_data
if data.root == "":
data.root = self.skel.pose.bones[0].name
if data.hip == "":
data.hip = self.skel.pose.bones[1].name
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
class ANIM_OT_integrate_root_motion(bpy.types.Operator):
"""Transfer root bone motion to hip bone"""
bl_idname = "anim.rm_integrate_rm"
bl_label = "Integrate Root Motion"
bl_options = {'REGISTER', 'UNDO'}
skel = None
@classmethod
def poll(cls, context):
return valid_armature(context) is not None
def modal(self, context, event):
ref = debug_character(context, self.skel)
data = context.scene.rm_data
root_expr = "\"%s\"" % data.root
hip_expr = "\"%s\"" % data.hip
curves = self.skel.animation_data.action.fcurves
for c in curves:
if root_expr in c.data_path or hip_expr in c.data_path:
curves.remove(c)
root = self.skel.pose.bones[data.root]
root.keyframe_insert(data_path="rotation_quaternion")
root.keyframe_insert(data_path="location")
root.keyframe_insert(data_path="scale")
hip = self.skel.pose.bones[data.hip]
ref_hip = ref.pose.bones[data.hip]
for f in steps(context, self.skel.animation_data.action.frame_range):
context.scene.frame_set(f)
ref_mtx = world_mtx(ref, ref_hip)
hip.matrix = pose_mtx(self.skel, hip, ref_mtx)
hip.keyframe_insert(data_path="rotation_quaternion")
hip.keyframe_insert(data_path="location")
hip.keyframe_insert(data_path="scale")
root.keyframe_insert(data_path="rotation_quaternion")
root.keyframe_insert(data_path="location")
root.keyframe_insert(data_path="scale")
return {'FINISHED'}
def invoke(self, context, event):
self.skel = valid_armature(context)
context.scene.frame_set(self.skel.animation_data.action.frame_range.x)
data = context.scene.rm_data
if data.root == "":
data.root = self.skel.pose.bones[0].name
if data.hip == "":
data.hip = self.skel.pose.bones[1].name
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
class ANIM_OT_animate_in_place(bpy.types.Operator):
"""Remove root motion from action, causing it to animate in-place"""
bl_idname = "anim.rm_anim_in_place"
bl_label = "Animate In Place"
bl_options = {'REGISTER', 'UNDO'}
skel = None
@classmethod
def poll(cls, context):
return valid_armature(context) is not None
def modal(self, context, event):
data = context.scene.rm_data
expr = "\"%s\"" % data.root
curves = self.skel.animation_data.action.fcurves
for c in curves:
if expr in c.data_path:
curves.remove(c)
root = self.skel.pose.bones[data.root]
frames = self.skel.animation_data.action.frame_range
for f in [round(frames.x), round(frames.y)]:
context.scene.frame_set(f)
root.keyframe_insert(data_path="rotation_quaternion")
root.keyframe_insert(data_path="location")
root.keyframe_insert(data_path="scale")
return {'FINISHED'}
def invoke(self, context, event):
self.skel = valid_armature(context)
context.scene.frame_set(self.skel.animation_data.action.frame_range.x)
data = context.scene.rm_data
if data.root == "":
data.root = self.skel.pose.bones[0].name
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
class ANIM_OT_remove_ref_character(bpy.types.Operator):
"""Remove reference character and its properties"""
bl_idname = "anim.rm_remove_ref_char"
bl_label = "Finalize Root Motion Operation"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.scene.rm_data.copy != ""
def execute(self, context):
char = bpy.data.objects.get(context.scene.rm_data.copy)
context.scene.rm_data.copy = ""
if char is None:
return {'CANCELLED'}
anim = char.animation_data.action
if anim != None:
bpy.data.actions.remove(anim, True)
context.scene.objects.unlink(char)
bpy.data.objects.remove(char, True)
return {'FINISHED'}
class PANEL_PT_main_panel(bpy.types.Panel):
bl_idname = "PANEL_PT_root_motionist_main"
bl_label = "Root Motionist"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Animation"
bl_context = "objectmode"
@classmethod
def poll(cls, context):
return valid_armature(context) is not None
def draw(self, context):
layout = self.layout
obj = context.active_object
col = layout.column(align=True)
col.prop_search(context.scene.rm_data, "root", obj.pose, "bones", text="Root")
col.prop_search(context.scene.rm_data, "hip", obj.pose, "bones", text="Hip")
col.prop(obj.animation_data, "action", text="Anim")
col = layout.column(align=True)
col.prop(context.scene.rm_data, "step")
col.prop(context.scene.rm_data, "no_rot")
col.prop(context.scene.rm_data, "do_vert")
layout.separator()
col = layout.column(align=True)
row = col.row(align=True)
row.operator("anim.rm_extract_root_motion", text="Extract")
row.operator("anim.rm_integrate_rm", text="Integrate")
col.operator("anim.rm_anim_in_place", text="Animate In-Place")
layout.operator("anim.rm_remove_ref_char", text="Delete Ref Character")
def valid_armature(context):
skel = context.active_object
if skel is not None and skel.type == 'ARMATURE':
if len(skel.pose.bones) >= 2:
if skel.animation_data.action is not None:
return skel
return None
def world_mtx(armature, bone):
return armature.convert_space(bone, bone.matrix, from_space='POSE', to_space='WORLD')
def pose_mtx(armature, bone, mat):
return armature.convert_space(bone, mat, from_space='WORLD', to_space='POSE')
def debug_character(context, original):
char = bpy.data.objects.get(context.scene.rm_data.copy)
if char is not None:
return char
char = original.copy()
char.data = original.data.copy()
char.animation_data.action = original.animation_data.action.copy()
char.name = "skel" + str(int(time.time()))
context.scene.rm_data.copy = char.name
context.scene.objects.link(char)
return char
def steps(context, frames):
last = round(frames.y)
frms = list(range(round(frames.x), last + 1, context.scene.rm_data.step))
if frms[-1] != last:
frms.append(last)
return frms
def register():
bpy.utils.register_class(RootMotionData)
bpy.utils.register_class(ANIM_OT_extract_root_motion)
bpy.utils.register_class(ANIM_OT_integrate_root_motion)
bpy.utils.register_class(ANIM_OT_animate_in_place)
bpy.utils.register_class(ANIM_OT_remove_ref_character)
bpy.utils.register_class(PANEL_PT_main_panel)
bpy.types.Scene.rm_data = bpy.props.PointerProperty(type=RootMotionData)
def unregister():
del bpy.types.Scene.rm_data
bpy.utils.unregister_class(RootMotionData)
bpy.utils.unregister_class(ANIM_OT_extract_root_motion)
bpy.utils.unregister_class(ANIM_OT_integrate_root_motion)
bpy.utils.unregister_class(ANIM_OT_animate_in_place)
bpy.utils.unregister_class(ANIM_OT_remove_ref_character)
bpy.utils.unregister_class(PANEL_PT_main_panel)
|
class Urls(SQLObject):
class sqlmeta:
fromDatabase = True
class Badurls(SQLObject):
class sqlmeta:
fromDatabase = True
class Word(SQLObject):
class sqlmeta:
fromDatabase = True
class Semanticlist(SQLObject):
class sqlmeta:
fromDatabase = True |
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Search test
"""
import unittest
import aciSearchDb
from acitoolkit.acitoolkit import (
AppProfile, BaseContract, BGPSession, BridgeDomain, Context, Contract,
ContractSubject, Endpoint, EPG, EPGDomain, Filter, FilterEntry, L2ExtDomain,
L2Interface, L3ExtDomain, L3Interface, MonitorPolicy, OSPFInterface,
OSPFInterfacePolicy, OSPFRouter, OutsideEPG, OutsideL3, PhysDomain,
PortChannel, Subnet, Taboo, Tenant, VmmDomain
)
LIVE_TEST = False
def get_tree():
"""
Will build an object tree with attributes in each object
:return:
"""
tenant = Tenant('tenant')
tenant.dn = '/tn-tenant'
app1 = AppProfile('app1', tenant)
app1.dn = app1._parent.dn + '/app-app1'
app2 = AppProfile('app2', tenant)
app2.dn = app2._parent.dn + '/app-app2'
epg11 = EPG('epg11', app1)
epg11.dn = epg11._parent.dn + '/epg-epg11'
epg12 = EPG('epg12', app1)
epg12.dn = epg12._parent.dn + '/epg-epg12'
epg21 = EPG('epg21', app2)
epg21.dn = epg21._parent.dn + '/epg-epg21'
epg22 = EPG('epg22', app2)
epg22.dn = epg22._parent.dn + '/epg-epg22'
bd1 = BridgeDomain('bd1', tenant)
bd1.dn = bd1._parent.dn + '/bd-bd1'
bd2 = BridgeDomain('bd2', tenant)
bd2.dn = bd2._parent.dn + '/bd-bd2'
epg11.add_bd(bd1)
epg12.add_bd(bd2)
epg21.add_bd(bd1)
epg22.add_bd(bd2)
context = Context('ctx', tenant)
context.dn = context._parent.dn + '/ctx-ctx'
bd1.add_context(context)
bd2.add_context(context)
contract1 = Contract('contract-1', tenant)
contract1.dn = contract1._parent.dn + '/con-contract1'
entry1 = FilterEntry('entry1',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='80',
dToPort='80',
etherT='ip',
prot='tcp',
sFromPort='1',
sToPort='65535',
tcpRules='unspecified',
parent=contract1)
subjects = contract1.get_children(ContractSubject)
for subject in subjects:
subject.dn = subject._parent.dn + '/subj-' + subject.name
filters = tenant.get_children(Filter)
for atk_filter in filters:
atk_filter.dn = atk_filter._parent.dn + '/flt-' + atk_filter.name
entry1.dn = entry1._parent.dn + '/flte-entry1'
epg11.provide(contract1)
epg11.consume(contract1)
epg12.consume(contract1)
epg11.value1 = 'value2'
bd1.value2 = 'value1'
return tenant
class Test_SearchIndexLookup(unittest.TestCase):
"""
Checks parsing of the XML
"""
def setUp(self):
tree = get_tree()
self.index = aciSearchDb.SearchIndexLookup()
self.index.add_atk_objects(tree)
def check_single(self, search_string, score, uids, terms):
results = self.index.search(search_string)
result_sorted = sorted(results[0])
expect_sorted = sorted(uids)
for index in range(len(uids)):
self.assertEqual(result_sorted[index]['uid'], expect_sorted[index])
self.assertEqual(result_sorted[index]['pscore'], score)
self.assertEqual(result_sorted[index]['terms'][0], terms)
self.assertEqual(results[1], len(uids))
def check_multi(self, search_string, expected_result):
results = self.index.search(search_string)
for index in range(len(expected_result)):
res = results[0][index]
exp = expected_result[index]
self.assertEqual(res['uid'], exp['uid'])
self.assertEqual(res['pscore'], exp['pscore'])
self.assertEqual(results[1], len(expected_result))
def test_one_term(self):
"""
Will test that the atk object tree is properly indexed
:return:
"""
self.check_single('#Tenant', 2, ['/tn-tenant'], "Tenant")
self.check_single('#EPG', 2, ['/tn-tenant/app-app1/epg-epg11',
'/tn-tenant/app-app1/epg-epg12',
'/tn-tenant/app-app2/epg-epg21',
'/tn-tenant/app-app2/epg-epg22'], "EPG")
self.check_single('#AppProfile', 2, ['/tn-tenant/app-app1',
'/tn-tenant/app-app2'], 'AppProfile')
self.check_single('#Context', 2, ['/tn-tenant/ctx-ctx'], 'Context')
self.check_single('#Contract', 2, ['/tn-tenant/con-contract1'], 'Contract')
self.check_single('#ContractSubject', 2, ['/tn-tenant/con-contract1/subj-contract-1_Subject'], 'ContractSubject')
self.check_single('@name', 2, ['/tn-tenant/app-app1/epg-epg11',
'/tn-tenant/flt-entry1_Filter/flte-entry1',
'/tn-tenant/app-app1/epg-epg12',
'/tn-tenant/flt-entry1_Filter',
'/tn-tenant/ctx-ctx',
'/tn-tenant/con-contract1/subj-contract-1_Subject',
'/tn-tenant/bd-bd2', '/tn-tenant/bd-bd1',
'/tn-tenant/app-app2/epg-epg22',
'/tn-tenant',
'/tn-tenant/app-app2',
'/tn-tenant/app-app1',
'/tn-tenant/app-app2/epg-epg21',
'/tn-tenant/con-contract1'], 'name')
self.check_single('@unicast_route', 2, ['/tn-tenant/bd-bd2', '/tn-tenant/bd-bd1'], 'unicast_route')
self.check_single('@bogus', 0, [], '')
self.check_single('=no', 2, ['/tn-tenant/flt-entry1_Filter/flte-entry1',
'/tn-tenant/bd-bd2',
'/tn-tenant/bd-bd1'], 'no')
self.check_single('=value1', 2, ['/tn-tenant/bd-bd1'], 'value1')
self.check_single('=value2', 2, ['/tn-tenant/app-app1/epg-epg11'], 'value2')
self.check_single('@value1', 2, ['/tn-tenant/app-app1/epg-epg11'], 'value1')
self.check_single('@value2', 2, ['/tn-tenant/bd-bd1'], 'value2')
self.check_single('*value1', 1, ['/tn-tenant/bd-bd1', '/tn-tenant/app-app1/epg-epg11'], 'value1')
self.check_single('*Context', 1, ['/tn-tenant/ctx-ctx'], 'Context')
def test_two_terms(self):
"""
Will test that the atk object tree is properly indexed
:return:
"""
self.check_single('#BridgeDomain=no', 4, ['/tn-tenant/bd-bd2', '/tn-tenant/bd-bd1'], "('BridgeDomain', 'no')")
self.check_single('=no#BridgeDomain', 4, ['/tn-tenant/bd-bd2', '/tn-tenant/bd-bd1'], "('BridgeDomain', 'no')")
self.check_single('@value2=value1', 4, ['/tn-tenant/bd-bd1'], "('value2', 'value1')")
self.check_single('@value2*value1', 3, ['/tn-tenant/bd-bd1'], "('value2', 'value1')")
self.check_single('=value1@value2', 4, ['/tn-tenant/bd-bd1'], "('value2', 'value1')")
self.check_single('*value2=value1', 3, ['/tn-tenant/bd-bd1'], "('value2', 'value1')")
self.check_single('value2=value1', 3, ['/tn-tenant/bd-bd1'], "('value2', 'value1')")
def test_three_terms(self):
"""
Will test that the atk object tree is properly indexed
:return:
"""
self.check_single('#AppProfile@name=app1', 8, ['/tn-tenant/app-app1'], "('AppProfile', 'name', 'app1')")
self.check_single('#AppProfile@name=app2', 8, ['/tn-tenant/app-app2'], "('AppProfile', 'name', 'app2')")
self.check_single('@name#AppProfile=app2', 8, ['/tn-tenant/app-app2'], "('AppProfile', 'name', 'app2')")
self.check_single('@name=app2#AppProfile', 8, ['/tn-tenant/app-app2'], "('AppProfile', 'name', 'app2')")
self.check_single('#AppProfile=app2@name', 8, ['/tn-tenant/app-app2'], "('AppProfile', 'name', 'app2')")
self.check_single('=app2#AppProfile@name', 8, ['/tn-tenant/app-app2'], "('AppProfile', 'name', 'app2')")
self.check_single('=app2@name#AppProfile', 8, ['/tn-tenant/app-app2'], "('AppProfile', 'name', 'app2')")
self.check_single('#BridgeDomain@arp_flood=no', 8, ['/tn-tenant/bd-bd1', '/tn-tenant/bd-bd2'],
"('BridgeDomain', 'arp_flood', 'no')")
self.check_single('BridgeDomain@arp_flood=no', 6, ['/tn-tenant/bd-bd1', '/tn-tenant/bd-bd2'],
"('BridgeDomain', 'arp_flood', 'no')")
self.check_single('*BridgeDomain@arp_flood=no', 6, ['/tn-tenant/bd-bd1', '/tn-tenant/bd-bd2'],
"('BridgeDomain', 'arp_flood', 'no')")
self.check_single('#BridgeDomain*arp_flood=no', 6, ['/tn-tenant/bd-bd1', '/tn-tenant/bd-bd2'],
"('BridgeDomain', 'arp_flood', 'no')")
self.check_single('#BridgeDomain@arp_flood*no', 6, ['/tn-tenant/bd-bd1', '/tn-tenant/bd-bd2'],
"('BridgeDomain', 'arp_flood', 'no')")
# todo: score of 5 for any, any, exact match not yet implemented
# self.check_single('BridgeDomain@arp_flood*no', 5, ['/tn-tenant/bd-bd1', '/tn-tenant/bd-bd2'])
def test_two_ind_terms(self):
"""
Will test that the atk object tree is properly indexed
:return:
"""
self.check_multi('#BridgeDomain =no', [{'uid': '/tn-tenant/bd-bd1', 'pscore': 4},
{'uid': '/tn-tenant/bd-bd2', 'pscore': 4},
{'uid': '/tn-tenant/flt-entry1_Filter/flte-entry1', 'pscore': 2}])
self.check_multi('=no #BridgeDomain', [{'uid': '/tn-tenant/bd-bd1', 'pscore': 4},
{'uid': '/tn-tenant/bd-bd2', 'pscore': 4},
{'uid': '/tn-tenant/flt-entry1_Filter/flte-entry1', 'pscore': 2}])
self.check_multi('#AppProfile @unicast_route', [{'uid': '/tn-tenant/app-app1', 'pscore': 2},
{'uid': '/tn-tenant/app-app2', 'pscore': 2},
{'uid': '/tn-tenant/bd-bd1', 'pscore': 2},
{'uid': '/tn-tenant/bd-bd2', 'pscore': 2}])
self.check_multi('@unicast_route #AppProfile ', [
{'uid': '/tn-tenant/app-app1', 'pscore': 2},
{'uid': '/tn-tenant/app-app2', 'pscore': 2},
{'uid': '/tn-tenant/bd-bd1', 'pscore': 2},
{'uid': '/tn-tenant/bd-bd2', 'pscore': 2}
])
class Test_SearchObjectStore(unittest.TestCase):
"""
Checks that objects are placed into the object store correctly, are cross-referenced, and
can be retrieved correctly.
"""
def setUp(self):
self.tree = get_tree()
self.store = aciSearchDb.SearchObjectStore()
self.store.add_atk_objects(self.tree)
def test_get_object(self):
atk_dn = "/tn-tenant/bd-bd2"
results = self.store.get_object_info(atk_dn)
self.assertEqual(results['attributes']['dn'], '/tn-tenant/bd-bd2')
self.assertEqual(results['parent']['dn'], '/tn-tenant')
self.assertEqual(results['parent']['class'], 'Tenant')
self.assertEqual(results['parent']['name'], 'tenant')
self.assertEqual(results['properties']['dn'], '/tn-tenant/bd-bd2')
self.assertEqual(results['properties']['class'], 'BridgeDomain')
self.assertEqual(results['properties']['name'], 'bd2')
self.assertEqual(results['relations']['context'][0]['dn'], '/tn-tenant/ctx-ctx')
self.assertEqual(results['relations']['epgs'][0]['dn'], '/tn-tenant/app-app1/epg-epg12')
results = self.store.get_object_info('/tn-tenant/app-app1/epg-epg12')
self.assertEqual(results['parent']['dn'], '/tn-tenant/app-app1')
self.assertEqual(results['parent']['class'], 'AppProfile')
self.assertEqual(results['parent']['name'], 'app1')
self.assertEqual(results['properties']['dn'], '/tn-tenant/app-app1/epg-epg12')
self.assertEqual(results['properties']['class'], 'EPG')
self.assertEqual(results['properties']['name'], 'epg12')
self.assertEqual(results['relations']['bridge domain'][0]['dn'], '/tn-tenant/bd-bd2')
self.assertEqual(results['relations']['consumes'][0]['dn'], '/tn-tenant/con-contract1')
results = self.store.get_object_info('/tn-tenant/app-app1/epg-epg11')
self.assertEqual(results['properties']['dn'], '/tn-tenant/app-app1/epg-epg11')
self.assertEqual(results['properties']['class'], 'EPG')
self.assertEqual(results['properties']['name'], 'epg11')
self.assertEqual(results['relations']['consumes'][0]['dn'], '/tn-tenant/con-contract1')
self.assertEqual(results['relations']['provides'][0]['dn'], '/tn-tenant/con-contract1')
results = self.store.get_object_info('/tn-tenant/con-contract1')
self.assertEqual(results['properties']['dn'], '/tn-tenant/con-contract1')
self.assertEqual(results['properties']['class'], 'Contract')
self.assertEqual(results['properties']['name'], 'contract-1')
self.assertEqual(results['relations']['consumed by'][0]['dn'], '/tn-tenant/app-app1/epg-epg11')
self.assertEqual(results['relations']['consumed by'][1]['dn'], '/tn-tenant/app-app1/epg-epg12')
self.assertEqual(results['relations']['provided by'][0]['dn'], '/tn-tenant/app-app1/epg-epg11')
def test_get_by_uid_short(self):
atk_dn = ["/tn-tenant/bd-bd2", '/tn-tenant/app-app1/epg-epg11']
results = self.store.get_by_uids_short(atk_dn)
self.assertEqual(results['/tn-tenant/bd-bd2']['dn'], '/tn-tenant/bd-bd2')
self.assertEqual(results['/tn-tenant/bd-bd2']['class'], 'BridgeDomain')
self.assertEqual(results['/tn-tenant/bd-bd2']['name'], 'bd2')
self.assertEqual(results['/tn-tenant/app-app1/epg-epg11']['dn'], '/tn-tenant/app-app1/epg-epg11')
self.assertEqual(results['/tn-tenant/app-app1/epg-epg11']['class'], 'EPG')
self.assertEqual(results['/tn-tenant/app-app1/epg-epg11']['name'], 'epg11')
class TestTerm(unittest.TestCase):
"""
Test the Search class
"""
def test_parse_class(self):
"""
Test that it can parse a class
:return: None
"""
terms = aciSearchDb.Term.parse_input('#class')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'class')
self.assertTrue(terms[0].type == 'c')
self.assertTrue(terms[0].points == 2)
terms = aciSearchDb.Term.parse_input('other#class')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('class', 'other'))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[0].points == 3)
self.assertTrue(terms[1].key == ('class', 'other'))
self.assertTrue(terms[1].type == 'cv')
self.assertTrue(terms[1].points == 3)
terms = aciSearchDb.Term.parse_input('#class1@other')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class1', 'other'))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[0].points == 4)
terms = aciSearchDb.Term.parse_input('#class=other')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'other'))
self.assertTrue(terms[0].type == 'cv')
self.assertTrue(terms[0].points == 4)
terms = aciSearchDb.Term.parse_input('#1class*other')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('1class', 'other'))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[0].points == 3)
self.assertTrue(terms[1].key == ('1class', 'other'))
self.assertTrue(terms[1].type == 'cv')
self.assertTrue(terms[1].points == 3)
terms = aciSearchDb.Term.parse_input('#cl_ass@')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('cl_ass', ''))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[0].points == 4)
terms = aciSearchDb.Term.parse_input('#cl-ass=')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('cl-ass', ''))
self.assertTrue(terms[0].type == 'cv')
self.assertTrue(terms[0].points == 4)
terms = aciSearchDb.Term.parse_input('#cl[ass*')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('cl[ass', ''))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[0].points == 3)
terms = aciSearchDb.Term.parse_input('#class#another_class')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'class')
self.assertTrue(terms[0].type == 'c')
self.assertTrue(terms[0].points == 2)
terms = aciSearchDb.Term.parse_input('#class#')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'class')
self.assertTrue(terms[0].type == 'c')
self.assertTrue(terms[0].points == 2)
terms = aciSearchDb.Term.parse_input('#class#')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'class')
self.assertTrue(terms[0].type == 'c')
self.assertTrue(terms[0].points == 2)
def test_parse_attr(self):
"""
Test that it can parse a class
:return: None
"""
terms = aciSearchDb.Term.parse_input('@attr')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'attr')
self.assertTrue(terms[0].type == 'a')
terms = aciSearchDb.Term.parse_input('other@attr')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('other', 'attr'))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[1].key == ('attr', 'other'))
self.assertTrue(terms[1].type == 'av')
terms = aciSearchDb.Term.parse_input('@attr1#other')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('other', 'attr1'))
self.assertTrue(terms[0].type == 'ca')
terms = aciSearchDb.Term.parse_input('@attr=other')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('attr', 'other'))
self.assertTrue(terms[0].type == 'av')
terms = aciSearchDb.Term.parse_input('@1attr*other')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('other', '1attr'))
self.assertTrue(terms[0].type == 'ca')
self.assertTrue(terms[1].key == ('1attr', 'other'))
self.assertTrue(terms[1].type == 'av')
terms = aciSearchDb.Term.parse_input('@at_tr@')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'at_tr')
self.assertTrue(terms[0].type == 'a')
terms = aciSearchDb.Term.parse_input('@at-tr=')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('at-tr', ''))
self.assertTrue(terms[0].type == 'av')
terms = aciSearchDb.Term.parse_input('@at[tr*')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('', 'at[tr'))
self.assertTrue(terms[0].type == 'ca')
terms = aciSearchDb.Term.parse_input('@attr@another_attr')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'attr')
self.assertTrue(terms[0].type == 'a')
terms = aciSearchDb.Term.parse_input('@attr@')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'attr')
self.assertTrue(terms[0].type == 'a')
def test_parse_value(self):
"""
Test that it can parse a value
:return: None
"""
terms = aciSearchDb.Term.parse_input('=value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'value')
self.assertTrue(terms[0].type == 'v')
terms = aciSearchDb.Term.parse_input('other=value')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('other', 'value'))
self.assertTrue(terms[0].type == 'cv')
self.assertTrue(terms[1].key == ('other', 'value'))
self.assertTrue(terms[1].type == 'av')
terms = aciSearchDb.Term.parse_input('=value1#other')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('other', 'value1'))
self.assertTrue(terms[0].type == 'cv')
terms = aciSearchDb.Term.parse_input('=value@other')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('other', 'value'))
self.assertTrue(terms[0].type == 'av')
terms = aciSearchDb.Term.parse_input('=1value*other')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('other', '1value'))
self.assertTrue(terms[0].type == 'cv')
self.assertTrue(terms[1].key == ('other', '1value'))
self.assertTrue(terms[1].type == 'av')
terms = aciSearchDb.Term.parse_input('=va_lue#')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('', 'va_lue'))
self.assertTrue(terms[0].type == 'cv')
terms = aciSearchDb.Term.parse_input('=va-lue@')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('', 'va-lue'))
self.assertTrue(terms[0].type == 'av')
terms = aciSearchDb.Term.parse_input('=va[lue*')
self.assertTrue(len(terms) == 2)
self.assertTrue(terms[0].key == ('', 'va[lue'))
self.assertTrue(terms[0].type == 'cv')
terms = aciSearchDb.Term.parse_input('=value=another_value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'value')
self.assertTrue(terms[0].type == 'v')
terms = aciSearchDb.Term.parse_input('=value=')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == 'value')
self.assertTrue(terms[0].type == 'v')
def test_parse_all(self):
"""
Test that it can parse a class, attr, and value
:return: None
"""
terms = aciSearchDb.Term.parse_input('#class@attr=value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 8)
self.assertTrue(terms[0].sql ==
"SELECT value FROM avc WHERE class = 'class' AND attribute = 'attr' AND value LIKE 'value%'")
terms = aciSearchDb.Term.parse_input('@attr#class=value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 8)
self.assertTrue(terms[0].sql ==
"SELECT value FROM avc WHERE class = 'class' AND attribute = 'attr' AND value LIKE 'value%'")
terms = aciSearchDb.Term.parse_input('@attr=value#class')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 8)
self.assertTrue(terms[0].sql ==
"SELECT class FROM avc WHERE attribute = 'attr' AND value = 'value' AND class LIKE 'class%'")
terms = aciSearchDb.Term.parse_input('=value@attr#class')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 8)
terms = aciSearchDb.Term.parse_input('=value#class@attr')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 8)
terms = aciSearchDb.Term.parse_input('@attr=value#class')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 8)
def test_parse_all_with_any(self):
"""
Test that it can parse a class, attr, and value
:return: None
"""
terms = aciSearchDb.Term.parse_input('*class@attr=value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 6)
terms = aciSearchDb.Term.parse_input('#class*attr=value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 6)
terms = aciSearchDb.Term.parse_input('#class@attr*value')
self.assertTrue(len(terms) == 1)
self.assertTrue(terms[0].key == ('class', 'attr', 'value'))
self.assertTrue(terms[0].type == 'cav')
self.assertTrue(terms[0].points == 6)
def test_parse_single_generic(self):
"""
Tests that a single, unqualified term will result in c, a, and v.
:return:
"""
terms = aciSearchDb.Term.parse_input('search_term')
self.assertTrue(len(terms) == 3)
self.assertTrue(terms[0].key == 'search_term')
self.assertTrue(terms[0].type == 'c')
self.assertTrue(terms[0].points == 1)
self.assertTrue(terms[1].key == 'search_term')
self.assertTrue(terms[1].type == 'a')
self.assertTrue(terms[1].points == 1)
self.assertTrue(terms[2].key == 'search_term')
self.assertTrue(terms[2].type == 'v')
self.assertTrue(terms[2].points == 1)
class TestCustomSplit(unittest.TestCase):
def test_simple_split(self):
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split("aaa bbb"),
['aaa', 'bbb'])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split("aaa bbb ccc ddd eee"),
['aaa', 'bbb', "ccc", "ddd", "eee"])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split("aaa bbb "),
['aaa', 'bbb'])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split(" aaa bbb"),
['aaa', 'bbb'])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split(" aaa bbb "),
['aaa', 'bbb'])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split('"aaa bbb" "ccc ddd" eee'),
['aaa bbb', "ccc ddd", "eee"])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split('"aaa bbb" ccc "ddd efg"'),
['aaa bbb', "ccc", "ddd efg"])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split('"aaa bbb" "ccc ddd" "eee'),
['aaa bbb', "ccc ddd", "eee"])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split('"aaa bbb" ccc "ddd efg'),
['aaa bbb', "ccc", "ddd efg"])
self.assertEqual(aciSearchDb.SearchIndexLookup._custom_split('"aaa bbb" ccc "ddd efg '),
['aaa bbb', "ccc", "ddd efg "])
@unittest.skipIf(LIVE_TEST is False, 'Not performing live APIC testing')
class TestLiveAPIC(unittest.TestCase):
def login_to_apic(self):
"""Login to the APIC
RETURNS: Instance of class Session
"""
pass
if __name__ == '__main__':
unittest.main()
|
import os
import sys
import re
import pickle
from typing import Union
import numpy as np
import pandas as pd
from scipy import stats, ndimage, io
import itertools
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import random
from sklearn.decomposition import PCA
import tifffile as tf
import math
import csv
from funcsforprajay.wrappers import plot_piping_decorator
############### GENERALLY USEFUL FUNCTIONS #############################################################################
# return the parent directory of a file:
def return_parent_dir(file_path: str):
return file_path[:[(s.start(), s.end()) for s in re.finditer('/', file_path)][-1][0]]
def list_in_dir(dir_path: str):
assert os.path.exists(dir_path)
return os.listdir(dir_path)
def timer(start, end):
"""source: https://stackoverflow.com/questions/27779677/how-to-format-elapsed-time-from-seconds-to-hours-minutes
-seconds-and-milliseco"""
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds) + ' hours, mins, seconds')
# report sizes of variables
def _sizeof_fmt(num, suffix='B'):
""" by Fred Cirera, https://stackoverflow.com/a/1094933/1870254, modified"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
# report sizes of variables
def print_size_of(var):
print(_sizeof_fmt(sys.getsizeof(var)))
# report sizes of variables
def print_size_vars():
for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),
key=lambda x: -x[1])[:10]:
print("{:>30}: {:>8}".format(name, _sizeof_fmt(size)))
# finding paths to files with a certain extension
def path_finder(umbrella, *args, is_folder=False):
'''
returns the path to the single item in the umbrella folder
containing the string names in each arg
is_folder = False if args is list of files
is_folder = True if args is list of folders
'''
# list of bools, has the function found each argument?
# ensures two folders / files are not found
found = [False] * len(args)
# the paths to the args
paths = [None] * len(args)
if is_folder:
for root, dirs, files in os.walk(umbrella):
for folder in dirs:
for i, arg in enumerate(args):
if arg in folder:
assert not found[i], 'found at least two paths for {},' \
'search {} to find conflicts' \
.format(arg, umbrella)
paths[i] = os.path.join(root, folder)
found[i] = True
elif not is_folder:
for root, dirs, files in os.walk(umbrella):
for file in files:
for i, arg in enumerate(args):
if arg in file:
assert not found[i], 'found at least two paths for {},' \
'search {} to find conflicts' \
.format(arg, umbrella)
paths[i] = os.path.join(root, file)
found[i] = True
print(paths)
for i, arg in enumerate(args):
if not found[i]:
raise ValueError('could not find path to {}'.format(arg))
return paths
# progress bar
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd="\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end=printEnd)
# Print New Line on Complete
if iteration == total:
print()
# find the closest value in a list to the given input
def findClosest(arr, input):
if type(arr) == list:
arr = np.array(arr)
subtract = arr - input
positive_values = abs(subtract)
# closest_value = min(positive_values) + input
index = np.where(positive_values == min(positive_values))[0][0]
closest_value = arr[index]
return closest_value, index
# flatten list of lists
def flattenOnce(list: Union[list, tuple], asarray=False):
""" flattens a nested list by one nesting level (should be able to run multiple times to get further down if needed for
deeper nested lists) """
# if not asarray:
# isnot_list = [False for i in list if type(i) != list]
# if len(isnot_list) > 0:
# print('not a nested list, so returning original list.')
# return list
# l_ = []
# return [l_.extend() for i in list]
# elif asarray:
# return np.asarray([x for i in list for x in i])
return [x for i in list for x in i]
# save .pkl files from the specified pkl_path
def save_pkl(obj, pkl_path: str):
if os.path.exists(return_parent_dir(pkl_path)):
os.makedirs(return_parent_dir(pkl_path), exist_ok=True)
with open(pkl_path, 'wb') as f:
pickle.dump(obj, f)
print(f"\- saved to {pkl_path} -- ")
else:
raise NotADirectoryError(f'parent directory of {pkl_path} cannot be reached.')
# load .pkl files from the specified pkl_path
def load_pkl(pkl_path: str):
if os.path.exists(pkl_path):
f = pickle.load(open(pkl_path, 'rb'))
return f
else:
raise FileNotFoundError(f"{pkl_path} not found")
############### STATS/DATA ANALYSIS FUNCTIONS ##########################################################################
def eq_line_2points(p1, p2):
"""
returns the y = mx + b equation given two points.
:param p1: point 1, x and y coord
:param p2: point 2, x and y coord
"""
from numpy import ones, vstack
from numpy.linalg import lstsq
x_coords, y_coords = zip(*[p1, p2])
A = vstack([x_coords, ones(len(x_coords))]).T
m, c = lstsq(A, y_coords)[0]
print(f'x: {x_coords}')
print(f'y: {y_coords}')
print("Line Solution is y = {m}x + {c}".format(m=m, c=c))
return m, c
def moving_average(a, n=4):
ret = np.cumsum(a)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
# calculate correlation across all cells
def corrcoef_array(array):
df = pd.DataFrame(array)
correlations = {}
columns = df.columns.tolist()
for col_a, col_b in itertools.combinations(columns, 2):
correlations[str(col_a) + '__' + str(col_b)] = stats.pearsonr(df.loc[:, col_a], df.loc[:, col_b])
result = pd.DataFrame.from_dict(correlations, orient='index')
result.columns = ['PCC', 'p-value']
corr = result['PCC'].mean()
print('Correlation coefficient: %.2f' % corr)
return corr, result
def points_in_circle_np(radius, x0=0, y0=0):
x_ = np.arange(x0 - radius - 1, x0 + radius + 1, dtype=int)
y_ = np.arange(y0 - radius - 1, y0 + radius + 1, dtype=int)
x, y = np.where((x_[:, np.newaxis] - x0) ** 2 + (y_ - y0) ** 2 <= radius ** 2)
for x, y in zip(x_[x], y_[y]):
yield x, y
# calculate distance between 2 points on a cartesian plane
def calc_distance_2points(p1: tuple, p2: tuple):
"""
uses the hypothenus method to calculate the straight line distance between two given points on a 2d cartesian plane.
:param p1: point 1
:param p2: point 2
:return:
"""
return math.hypot(p2[0] - p1[0], p2[1] - p1[1])
def lin_regression(x: list, y: list):
return np.poly1d(np.polyfit(x, y, 1))(range(np.min(x), np.max(x)))
# retrieve x, y points from csv
def xycsv(csvpath):
xline = []
yline = []
with open(csvpath) as csv_file:
csv_file = csv.DictReader(csv_file, fieldnames=None, dialect='excel')
for row in csv_file:
xline.append(int(float(row['xcoords'])))
yline.append(int(float(row['ycoords'])))
# assumption = line is monotonic
line_argsort = np.argsort(yline)
xline = np.array(xline)[line_argsort]
yline = np.array(yline)[line_argsort]
return xline, yline
# read matlab array
def load_matlab_array(path):
"""
Returns a matlab array read in from the path given in path.
:param path: path to the matlab output file ending in .mat
:return: array
"""
return io.loadmat(path)
# read csv
def read_csv(csvpath):
with open(csvpath) as csv_file:
csv_file = csv.DictReader(csv_file, fieldnames=None, dialect='excel')
return csv_file
# find percentile of a value within an array
def find_percentile(d, threshold):
return sum(np.abs(d) < threshold) / float(len(d)) * 100
# random func for rotating images and calculating the image intensity along one axis of the image
def rotate_img_avg(input_img, angle):
"""this function will be used to rotate the input_img (ideally will be the avg seizure image) at the given angle.
The function also will return the 1 x n length average across non-zero values along the x axis.
:param input_img: ndarray comprising the image
:param angle: the angle to rotate the image with (in degrees), +ve = counter-clockwise
"""
full_img_rot = ndimage.rotate(input_img, angle, reshape=True)
return full_img_rot
# PCA decomposition(/compression) of an image
def pca_decomp_image(input_img, components: int = 3, plot_quant: bool = False):
"""
the method for PCA based decomposition/compression of an image, and also (optional) quantification of the resulting
image across the x axis
:param input_img: ndarray; input image
:param components: int; # of principle components to use for the PCA decomposition (compression) of the input_img
:param plot_quant: bool; plot quantification of the average along x-axis of the image
:return: ndarray; compressed image, imshow plots of the original and PCA compressed images, as well as plots of average across the x-axis
"""
print("Extracting the top %d eigendimensions from image" % components)
pca = PCA(components)
img_transformed = pca.fit_transform(input_img)
img_compressed = pca.inverse_transform(img_transformed)
if plot_quant:
# quantify the input image
fig = plt.figure(figsize=(15, 5))
ax1, ax2, ax3 = fig.subplots(1, 3)
ax1.imshow(input_img, cmap='gray')
ax2.imshow(img_compressed, cmap='gray')
img_t = input_img.T
avg = np.zeros([img_t.shape[0], 1])
for i in range(len(img_t)):
x = img_t[i][img_t[i] > 0]
if len(x) > 0:
avg[i] = x.mean()
else:
avg[i] = 0
ax3.plot(avg)
ax3.set_xlim(20, len(img_t) - 20)
ax3.set_title('average plot quantification of the input img', wrap=True)
plt.show()
# quantify the PC reconstructed image
fig = plt.figure(figsize=(15, 5))
ax1, ax2, ax3 = fig.subplots(1, 3)
ax1.imshow(input_img, cmap='gray')
ax2.imshow(img_compressed, cmap='gray')
img_compressed = img_compressed.T
avg = np.zeros([img_compressed.shape[0], 1])
for i in range(len(img_compressed)):
x = img_compressed[i][img_compressed[i] > 0]
if len(x) > 0:
avg[i] = x.mean()
else:
avg[i] = 0
ax3.plot(avg)
ax3.set_xlim(20, len(img_compressed.T) - 20)
ax3.title.set_text('average plot quantification of the PCA compressed img - %s dimensions' % components)
plt.show()
return img_compressed
# grouped average / smoothing of a 1dim array (basically the same as grouped average on imageJ)
def smoothen_signal(signal, w):
return np.convolve(signal, np.ones(w), 'valid') / w
############### PLOTTING FUNCTIONS #####################################################################################
# general plotting function for making plots quickly (without having to write out a bunch of lines of code)
# custom colorbar for heatmaps
from matplotlib.colors import LinearSegmentedColormap
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return LinearSegmentedColormap('CustomMap', cdict)
# generate an array of random line_colors
def _get_random_color(pastel_factor=0.5):
return [(x + pastel_factor) / (1.0 + pastel_factor) for x in [random.uniform(0, 1.0) for i in [1, 2, 3]]]
def _color_distance(c1, c2):
return sum([abs(x[0] - x[1]) for x in zip(c1, c2)])
def _generate_new_color(existing_colors, pastel_factor=0.5):
max_distance = None
best_color = None
for i in range(0, 100):
color = _get_random_color(pastel_factor=pastel_factor)
if not existing_colors:
return color
best_distance = min([_color_distance(color, c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
return best_color
def make_random_color_array(n_colors):
"""
Generates a list of random line_colors for an input number of line_colors required.
:param n_colors: # of line_colors required
:return: list of line_colors in RGB
"""
colors = []
for i in range(0, n_colors):
colors.append(_generate_new_color(colors, pastel_factor=0.2))
return colors
@plot_piping_decorator(verbose=False)
def make_general_scatter(x_list: list, y_data: list, fig=None, ax=None,
**kwargs): ## TODO remove the double plotting, just give option to plot all individual as stamps or together!
"""
General function for quick, simple plotting of data lists as scatters. NOTE: THIS FUNC MAKES TWO SEPARATE PLOTS if given >1 dataset to plot.
:param x_list: list of x_points for plots, must match one to one to y_data
:param y_data: list of y_data for plots, must match one to one to x_list
:param kwargs: (optional)
line_colors: list, line_colors to use to plot >1 data sets
ax_y_labels: list, y_labels to use to plot >1 data sets
ax_x_labels: list, x_labels to use to plot >1 data sets
y_label: str, y_labels to use to plot the combined main plot
x_label: str, x_labels to use to plot the combined main plot
legend_labels: list[str], legend_labels to use to plot the combined main plot
ax_titles: list of ax_titles to use to plot >1 data traces
x_lim: tuple, used to set x_lim of plot
suptitle: str, used for suptitle of fig
:return None
"""
assert len(y_data) == len(x_list), 'y_data length does not match x_list length'
num_plots = len(x_list)
if 'line_colors' not in kwargs.keys():
colors = make_random_color_array(num_plots)
else:
assert type(kwargs['line_colors']) is list and len(kwargs['line_colors']) == len(
x_list), 'provide line_colors argument in list form matching number of traces to plot'
colors = kwargs['line_colors']
edgecolors = colors if 'edgecolors' not in [*kwargs] else kwargs['edgecolors']
# set plotting properties
if 'alpha' in kwargs.keys():
alpha = kwargs['alpha']
else:
alpha = 0.8
if 's' in kwargs.keys():
size = kwargs['s']
else:
size = 50
lw = 0 if 'lw' not in [*kwargs] else kwargs['lw']
# check integrity of function call arguments
if 'ax_y_labels' in kwargs.keys() and type(kwargs['ax_y_labels']) is list: assert len(
kwargs['y_labels']) == num_plots
if 'ax_x_labels' in kwargs.keys() and type(kwargs['ax_x_labels']) is list: assert len(
kwargs['x_labels']) == num_plots
if 'ax_titles' in kwargs.keys() and type(kwargs['ax_titles']) is list: assert len(kwargs['ax_titles']) == num_plots
if 'legend_labels' in kwargs.keys() and type(kwargs['legend_labels']) is list:
assert len(kwargs[
'legend_labels']) == num_plots, 'legend_labels len does not match number of plots to make (len of x_list)'
label = kwargs['legend_labels']
else:
label = ['']
if num_plots > 1:
ncols = 4
nrows = len(x_list) // ncols
if len(x_list) % ncols > 0:
nrows += 1
fig2, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=[(ncols * 4), (nrows * 3)])
counter = 0
axs[0, 0].set_xlabel(kwargs['ax_x_labels'][0]) if 'ax_x_labels' in kwargs.keys() else None
axs[0, 0].set_ylabel(kwargs['ax_y_labels'][0]) if 'ax_y_labels' in kwargs.keys() else None
# prep for single small plot with all plots
# fig, ax = plt.subplots(figsize=(4, 3))
# fig, ax = kwargs['fig'], kwargs['ax']
for i in range(num_plots):
if 'supress_print' in [*kwargs] and kwargs['supress_print'] != True: print(
f"plotting plot # {i + 1} out of {num_plots}, {len(x_list[i])} points")
ax.scatter(x=x_list[i], y=y_data[i], facecolors=colors[i], edgecolors=edgecolors[i], alpha=alpha, lw=lw, s=size,
label=label[i])
if num_plots > 1:
a = counter // ncols
b = counter % ncols
# make plot for individual key/experiment trial
ax2 = axs[a, b]
ax2.scatter(x=x_list[i], y=y_data[i], facecolors=colors[i], edgecolors=edgecolors[i], alpha=alpha, lw=lw,
s=size, label=label[i])
ax2.set_xlim(-50, 50)
ax2.set_title(f"{kwargs['ax_titles'][i]}") if 'ax_titles' in kwargs.keys() else None
counter += 1
else:
ax.set_title(f"{kwargs['ax_titles'][i]}") if 'ax_titles' in kwargs.keys() else None
ax.set_xlim(kwargs['x_lim'][0], kwargs['x_lim'][1]) if 'x_lim' in kwargs.keys() else None
ax.set_ylim(kwargs['y_lim'][0], kwargs['y_lim'][1]) if 'y_lim' in kwargs.keys() else None
ax.set_xlabel(kwargs['x_label']) if 'x_label' in kwargs.keys() else None
ax.set_ylabel(kwargs['y_label']) if 'y_label' in kwargs.keys() else None
ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") if 'legend_labels' in kwargs.keys() else None
fig.tight_layout(pad=1.8)
if num_plots > 1:
fig2.suptitle(f"all plots individual")
fig2.tight_layout(pad=1.8)
# print(f"\nsaving multi-axes plot to {kwargs['save_path_full']}") if 'save_path_full' in kwargs.keys() else None
# fig2.savefig(kwargs['save_path_full']) if 'save_path_full' in kwargs.keys() else None
fig2.show()
# print(f'trying to return {fig2}')
# return fig2
# @print_start_end_plot
@plot_piping_decorator()
def make_general_plot(data_arr, x_range=None, twin_x: bool = False, plot_avg: bool = True, plot_std: bool = True,
**kwargs):
"""
General function for quick, simple plotting of arbritary data arrays.
:param data_arr: list of data-traces to plot, or np.ndarray containing data-traces
:param x_range: list of x-ranges to plot, or np.ndarray containing x-ranges
:param twin_x: iff two traces, option to plot on same axis
:param plot_avg: if more than two traces, whether to plot average of the data traces
:param plot_std: if more than two traces, whether to plot std of the data traces, if false will plot individual data traces in random color
:param kwargs: (optional)
line_colors: list, line_colors to use to plot >1 data traces
y_labels: list, y_labels to use to plot >1 data traces
x_labels: list, x_labels to use to plot >1 data traces
ax_titles: list of ax_titles to use to plot >1 data traces
title: str, one title for one ax plotting
y_label: str, one y label for one ax plotting
x_label: str, one x label for one ax plotting
fontsize: float, fontsizes within the plot of text labels
v_span: tuple, vertical span fill - will be same for each axis
suptitle: str, used for suptitle of fig
:return None
"""
f, axs = kwargs['fig'], [kwargs['ax']]
# prepare for plotting over multiple axes if called for
if len(axs) > 1:
num_axes = len(axs)
else:
num_axes = 1
# axs = np.array([axs])
# create data arrays in the correct format for plotting
if type(data_arr) is list:
num_traces = len(data_arr)
elif type(data_arr) is np.ndarray:
num_traces = data_arr.shape[0]
else:
raise Exception('data_arr must be of type list of np.ndarray')
# check if plotting multi-traces on 1 axis (but not twinx style!):
if num_traces > num_axes and num_axes == 1:
alpha = 0.3
else:
alpha = 1
plot_avg = False # turn off plotting of average trace
plot_std = False # turn off plotting of std trace from data
# add twin x if called for:
if num_traces == 2 and twin_x is True:
ax = axs[0]
ax2 = ax.twinx()
axs = np.array([ax, ax2])
num_axes = 2
print(f'\nPlotting {num_traces} data traces across {num_axes} axes') if not twin_x else print(
f'\nPlotting {num_traces} data traces across 1 axes (with twin_x)')
# create x_range to use for plotting
if x_range is not None:
if type(x_range) is list:
x_range = np.asarray(x_range)
assert x_range.shape == data_arr.shape, print(
'|- AssertionError: mismatch between data to plot and x_range provided for this data')
else:
x_range = np.empty_like(data_arr)
for i in range(num_traces):
x_range[i] = range(len(data_arr[i]))
# make random line_colors for plotting
if 'line_colors' not in kwargs.keys():
colors = make_random_color_array(num_traces) if num_traces > 1 else ['black']
else:
assert type(kwargs['line_colors']) is list, print(
'|- AssertionError: provide line_colors argument in list form')
assert len(kwargs['line_colors']) == num_traces, print(
'|- AssertionError: provide enough line_colors as number of traces to plot')
colors = kwargs['line_colors']
# check integrity of function call arguments
if 'y_labels' in kwargs.keys() and len(kwargs['y_labels']) > 1: assert len(kwargs['y_labels']) == num_traces
if 'x_labels' in kwargs.keys() and len(kwargs['x_labels']) > 1: assert len(kwargs['x_labels']) == num_traces
if 'ax_titles' in kwargs.keys(): assert len(kwargs['ax_titles']) == num_traces
# shrink or enlarge the fontsize option:
fontsize = kwargs['fontsize'] if 'fontsize' in kwargs.keys() else 10
# make the plot using each provided data trace
ax_counter = 0
if 'v_span' in kwargs.keys() and type(kwargs['v_span']) is tuple:
axs[ax_counter].axvspan(kwargs['v_span'][0], kwargs['v_span'][1], color='indianred', zorder=1)
if plot_std is False or num_traces == 1: # only plot individual lines if plot_std is inactive
print(f'\- plotting {num_traces} individual traces on {num_axes} axes')
for i in range(num_traces):
axs[ax_counter].plot(x_range[i], data_arr[i], color=colors[i], alpha=alpha)
if num_axes > 1:
axs[ax_counter].set_xlabel(kwargs['ax_titles'][i],
fontsize=fontsize) if 'ax_titles' in kwargs.keys() else None
axs[ax_counter].set_xlabel(kwargs['x_labels'][i],
fontsize=fontsize) if 'x_labels' in kwargs.keys() else None
axs[ax_counter].set_ylabel(kwargs['y_labels'][i],
fontsize=fontsize) if 'y_labels' in kwargs.keys() else None
ax_counter += 1
if num_axes == 1 and twin_x is False and num_traces > 1:
if plot_avg:
print(f'\- plotting average trace of {data_arr.shape[0]} traces on 1 axis')
axs[ax_counter].plot(x_range[0], np.mean(data_arr, axis=0), color='black', alpha=1,
zorder=data_arr.shape[0] + 1)
if plot_std:
print(f'\- plotting std trace of {data_arr.shape[0]} traces on 1 axis')
std_low = np.mean(data_arr, axis=0) - np.std(data_arr, axis=0)
std_high = np.mean(data_arr, axis=0) + np.std(data_arr, axis=0)
axs[ax_counter].fill_between(x_range[0], std_low, std_high, color='gray', alpha=0.5, zorder=0)
axs[ax_counter].set_title(kwargs['title'], fontsize=fontsize * 1.1, wrap=True) if 'title' in kwargs.keys() else \
axs[ax_counter].set_title(f"{num_traces} traces")
axs[ax_counter].set_ylabel(kwargs['y_label'], fontsize=fontsize) if 'y_label' in kwargs.keys() else None
axs[ax_counter].set_xlabel(kwargs['x_label'], fontsize=fontsize) if 'x_label' in kwargs.keys() else None
axs[ax_counter].set_ylabel(kwargs['y_labels'], fontsize=fontsize) if 'y_labels' in kwargs.keys() else None
axs[ax_counter].set_xlabel(kwargs['x_labels'], fontsize=fontsize) if 'x_labels' in kwargs.keys() else None
return None
### plot the location of provided coordinates
@plot_piping_decorator(figsize=(5, 5), verbose=False)
def plot_coordinates(coords: list, frame_x: int, frame_y: int, background: np.ndarray = None, fig=None, ax=None,
**kwargs):
"""
plot coordinate locations
:param targets_coords: ls containing (x,y) coordinates of targets to plot
:param background: np.array on which to plot coordinates, default is black background (optional)
:param kwargs:
"""
if background is None:
background = np.zeros((frame_x, frame_y), dtype='uint16')
ax.imshow(background, cmap='gray')
else:
ax.imshow(background, cmap='gray')
if 'edgecolors' in kwargs.keys():
edgecolors = kwargs['edgecolors']
else:
edgecolors = 'yellowgreen'
# set facecolors of the plotted coordinates
facecolors = kwargs['facecolors'] if 'facecolors' in kwargs.keys() else 'none'
# shrink or enlarge the fontsize option:
fontsize = kwargs['fontsize'] if 'fontsize' in kwargs.keys() else 10
for (x, y) in coords:
ax.scatter(x=x, y=y, edgecolors=edgecolors, facecolors=facecolors, linewidths=2.0)
ax.set_title(kwargs['title'], fontsize=fontsize * 1.1, wrap=True) if 'title' in kwargs.keys() else ax.set_title(
f"{len(coords)} coordinates")
ax.margins(0)
fig.tight_layout()
# plot a 2d histogram density plot
@plot_piping_decorator(figsize=(5, 5), verbose=False)
def plot_hist2d(data: np.array, fig=None, ax=None, **kwargs):
"""
plot 2d histogram
:param data: data array to be plotted
:param kwargs:
"""
# check data structure:
assert data.shape[1] == 2 and data.ndim == 2, "data np.array shape must be (n, 2)"
# set colormap for the 2d density plot
cmap = kwargs['cmap'] if 'cmap' in kwargs.keys() else 'inferno'
# set colormap for the 2d density plot
bins = kwargs['bins'] if 'bins' in kwargs.keys() and len(kwargs['bins']) == 2 else [100, 100]
print(f"|- plotting with: {bins} (Nx, Ny) bins [.1]")
# shrink or enlarge the fontsize option:
fontsize = kwargs['fontsize'] if 'fontsize' in kwargs.keys() else 10
ax.hist2d(data[:, 0], data[:, 1], bins=bins, cmap=cmap)
ax.set_title(kwargs['title'], fontsize=fontsize * 1.1, wrap=True) if 'title' in kwargs.keys() else ax.set_title(
f"2d density plot, {bins} bins")
ax.set_ylabel(kwargs['y_label'], fontsize=fontsize) if 'y_label' in kwargs.keys() else None
ax.set_xlabel(kwargs['x_label'], fontsize=fontsize) if 'x_label' in kwargs.keys() else None
ax.set_ylim(kwargs['y_lim'][0], kwargs['y_lim'][1]) if 'y_lim' in kwargs.keys() else None
ax.set_xlim(kwargs['x_lim'][0], kwargs['x_lim'][1]) if 'x_lim' in kwargs.keys() else None
ax.margins(0)
fig.tight_layout()
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
from: https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib/49601444
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
# # plotting function for plotting a bar graph with the individual data points shown as well
# def plot_bar_with_points(data, title='', x_tick_labels=[], legend_labels: list = [], points: bool = True,
# bar: bool = True, colors: list = ['black'], ylims=None, xlims=True, text_list=None,
# x_label=None, y_label=None, alpha=0.2, savepath=None, expand_size_x=1, expand_size_y=1,
# shrink_text: float = 1, show_legend=False,
# paired=False, title_pad=20, **kwargs):
# """
# all purpose function for plotting a bar graph of multiple categories with the option of individual datapoints shown
# as well. The individual datapoints are drawn by adding a scatter plot with the datapoints randomly jittered around the central
# x location of the bar graph. The individual points can also be paired in which case they will be centered. The bar can also be turned off.
#
# :param data: list; provide data from each category as a list and then group all into one list
# :param title: str; title of the graph
# :param x_tick_labels: labels to use for categories on x axis
# :param legend_labels:
# :param points: bool; if True plot individual data points for each category in data using scatter function
# :param bar: bool, if True plot the bar, if False plot only the mean line
# :param colors: line_colors (by category) to use for each x group
# :param ylims: tuple; y axis limits
# :param xlims: the x axis is used to position the bars, so use this to move the position of the bars left and right
# :param x_label: x axis label
# :param y_label: y axis label
# :param text_list: list of text to add to each category of data on the plot
# :param text_shift: float; number between 0.5 to 1 used to adjust precise positioning of the text in text_list
# :param alpha: transparency of the individual points when plotted in the scatter
# :param savepath: .svg file path; if given, the plot will be saved to the provided file path
# :param expand_size_x: factor to use for expanding figure size
# :param expand_size_y: factor to use for expanding figure size
# :param paired: bool, if True then draw lines between data points of the same index location in each respective list in the data
# :return: matplotlib plot
# """
#
# # collect some info about data to plot
# w = 0.3 # mean bar width
# xrange_ls = list(range(len(data)))
# y = data
# if len(colors) != len(xrange_ls):
# colors = colors * len(xrange_ls)
#
# # initialize plot
# if 'fig' in kwargs.keys():
# f = kwargs['fig']
# ax = kwargs['ax']
# else:
# f, ax = plt.subplots(figsize=((5 * len(xrange_ls) / 2) * expand_size_x, 3 * expand_size_y))
#
# if paired:
# assert len(xrange_ls) > 1
#
# # start making plot
# if not bar:
# for i in xrange_ls:
# ## plot the mean line
# ax.plot(np.linspace(xrange_ls[i] * w * 2.5 - w / 2, xrange_ls[i] * w * 2.5 + w / 2, 3), [np.mean(y[i])] * 3,
# color='black')
# lw = 0,
# edgecolor = None
# # since no bar being shown on plot (lw = 0 from above) then use it to plot the error bars
# ax.bar([x * w * 2.5 for x in xrange_ls],
# height=[np.mean(yi) for yi in y],
# yerr=[np.std(yi, ddof=1) for yi in y], # error bars
# capsize=4.5, # error bar cap width in points
# width=w, # bar width
# linewidth=lw, # width of the bar edges
# edgecolor=edgecolor,
# color=(0, 0, 0, 0), # face edgecolor transparent
# zorder=2
# )
# elif bar:
# if 'edgecolor' not in kwargs.keys():
# edgecolor = 'black',
# lw = 1
# else:
# edgecolor = kwargs['edgecolor'],
# lw = 1
# # plot bar graph
# ax.errorbar([x * w * 2.5 for x in xrange_ls], [np.mean(yi) for yi in y], fmt='none',
# yerr=np.asarray([np.asarray([0, np.std(yi, ddof=1)]) for yi in y]).T, ecolor='gray',
# capsize=5, zorder=0)
# ax.bar([x * w * 2.5 for x in xrange_ls],
# height=[np.mean(yi) for yi in y],
# # yerr=np.asarray([np.asarray([0, np.std(yi, ddof=1)]) for yi in y]).T, # error bars
# capsize=4.5, # error bar cap width in points
# width=w, # bar width
# linewidth=lw, # width of the bar edges
# edgecolor=edgecolor,
# color=(0, 0, 0, 0), # face edgecolor transparent
# zorder=2
# )
# else:
# AttributeError('something wrong happened with the bar bool parameter...')
#
# ax.set_xticks([x * w * 2.5 for x in xrange_ls])
# if len(xrange_ls) > 1:
# ax.set_xticklabels(x_tick_labels, fontsize=10 * shrink_text, rotation=45)
# else:
# ax.set_xticklabels(x_tick_labels, fontsize=10 * shrink_text)
#
# if xlims:
# ax.set_xlim([(xrange_ls[0] * w * 2) - w * 1.20, (xrange_ls[-1] * w * 2.5) + w * 1.20])
# elif len(xrange_ls) == 1: # set the x_lims for single bar case so that the bar isn't autoscaled
# xlims_ = [-1, 1]
# ax.set_xlim(xlims_)
#
# if len(legend_labels) == 0:
# if len(x_tick_labels) == 0:
# x_tick_labels = [None] * len(xrange_ls)
# legend_labels = x_tick_labels
#
# if points:
# if not paired:
# for i in xrange_ls:
# # distribute scatter randomly across whole width of bar
# ax.scatter(xrange_ls[i] * w * 2.5 + np.random.random(len(y[i])) * w - w / 2, y[i], color=colors[i],
# alpha=alpha, label=legend_labels[i])
#
# else: # connect lines to the paired scatter points in the list
# if len(xrange_ls) > 0:
# for i in xrange_ls:
# # plot points # dont scatter location of points if plotting paired lines
# ax.scatter([xrange_ls[i] * w * 2.5] * len(y[i]), y[i], color=colors[i], alpha=0.5,
# label=legend_labels[i], zorder=3)
# for i in xrange_ls[:-1]:
# for point_idx in range(len(y[i])): # draw the lines connecting pairs of data
# ax.plot([xrange_ls[i] * w * 2.5 + 0.058, xrange_ls[i + 1] * w * 2.5 - 0.048],
# [y[i][point_idx], y[i + 1][point_idx]], color='black', zorder=2, alpha=alpha)
#
# # for point_idx in range(len(y[i])): # slight design difference, with straight line going straight through the scatter points
# # ax.plot([x * w * 2.5 for x in x],
# # [y[i][point_idx] for i in x], color='black', zorder=0, alpha=alpha)
#
# else:
# AttributeError('cannot do paired scatter plotting with only one data category')
#
# if ylims:
# ax.set_ylim(ylims)
# elif len(xrange_ls) == 1: # set the y_lims for single bar case so that the bar isn't autoscaled
# ylims = [0, 2 * max(data[0])]
# ax.set_ylim(ylims)
#
# # Hide the right and top spines
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
#
# ax.tick_params(axis='both', which='both', length=5)
#
# # Only show ticks on the left and bottom spines
# ax.yaxis.set_ticks_position('left')
# ax.xaxis.set_ticks_position('bottom')
#
# ax.set_xlabel(x_label, fontsize=10 * shrink_text)
# ax.set_ylabel(y_label, fontsize=10 * shrink_text)
# if savepath:
# plt.savefig(savepath)
#
# # add text to the figure if given:
# if text_list:
# assert len(xrange_ls) == len(text_list), 'please provide text_list of same len() as data'
# if 'text_shift' in kwargs.keys():
# text_shift = kwargs['text_shift']
# else:
# text_shift = 0.8
# if 'text_y_pos' in kwargs.keys():
# text_y_pos = kwargs['text_y_pos']
# else:
# text_y_pos = max([np.percentile(y[i], 95) for i in xrange_ls])
# for i in xrange_ls:
# ax.text(xrange_ls[i] * w * 2.5 - text_shift * w / 2, text_y_pos, text_list[i]),
#
# if len(legend_labels) > 1:
# if show_legend:
# ax.legend(bbox_to_anchor=(1.01, 0.90), fontsize=8 * shrink_text)
#
# # add title
# if 'fig' not in kwargs.keys():
# ax.set_title(title, horizontalalignment='center', pad=title_pad,
# fontsize=11 * shrink_text, wrap=True)
# else:
# ax.set_title((title), horizontalalignment='center', pad=title_pad,
# fontsize=11 * shrink_text, wrap=True)
#
# if 'show' in kwargs.keys():
# if kwargs['show'] is True:
# # Tweak spacing to prevent clipping of ylabel
# f.tight_layout(pad=1.4)
# f.show()
# else:
# return f, ax
# else:
# # Tweak spacing to prevent clipping of ylabel
# f.tight_layout(pad=1.4)
# f.show()
# histogram density plot with gaussian best fit line
def plot_hist_density(data: list, mean_line: bool = False, colors: list = None, fill_color: list = None,
legend_labels: list = [None], num_bins=10, best_fit_line='gaussian', **kwargs):
"""
:param data: list; nested list containing the data; if only one array to plot then provide array enclosed inside list (e.g. [array])
:param colors:
:param fill_color:
:param legend_labels:
:param num_bins:
:param kwargs:
:return:
"""
fig = kwargs['fig']
ax = kwargs['ax']
if colors is None:
colors = ['black'] * len(data)
if len(data) == 1 and fill_color is None:
fill_color = ['steelblue']
else:
assert len(data) == len(colors)
assert len(data) == len(fill_color), print('please provide a fill color for each dataset')
if legend_labels is [None]:
legend_labels = [None] * len(data)
else:
assert len(legend_labels) == len(data), print('please provide a legend label for all your data to be plotted!')
# set the transparancy for the fill of the plot
if 'alpha' in kwargs and (type(kwargs['alpha']) is float or kwargs['alpha'] == 1):
alpha = kwargs['alpha']
else:
alpha = 0.3
# make the primary histogram density plot
zorder = 2
for i in range(len(data)):
# the histogram of the data
bin_heights, bins, patches = ax.hist(data[i], num_bins, density=1, alpha=0.4, color=fill_color[i],
label=legend_labels[i]) # histogram hidden currently
# add a 'best fit' line
if best_fit_line == 'powerlaw':
from scipy.optimize import curve_fit
def func_powerlaw(x, m, c, c0):
return c0 + x ** m * c
target_func = func_powerlaw
X = np.linspace(bins[0], bins[-1], num_bins)
y = bin_heights
popt, pcov = curve_fit(target_func, X, y, maxfev = 1000000)
ax.plot(X, target_func(X, *popt), linewidth=2, c=colors[i], zorder=zorder + i)
ax.fill_between(X, target_func(X, *popt), color=fill_color[i], zorder=zorder + i, alpha=alpha)
print(bins)
print('m, c, c0: \n\t', popt)
title = 'Histogram density: powerlaw fit'
elif best_fit_line == 'gaussian':
# fitting a gaussian
mu = np.mean(data[i]) # mean of distribution
sigma = np.std(data[i]) # standard deviation of distribution
x = np.linspace(bins[0], bins[-1], num_bins * 5)
popt = ((1 / (np.sqrt(2 * np.pi) * sigma)) *
np.exp(-0.5 * (1 / sigma * (x - mu)) ** 2))
ax.plot(x, popt, linewidth=2, c=colors[i], zorder=zorder + i)
ax.fill_between(x, popt, color=fill_color[i], zorder=zorder + i, alpha=alpha)
title = (r': $\mu=%s$, $\sigma=%s$' % (round(mu, 2), round(sigma, 2)))
else:
title = ''
if mean_line:
ax.axvline(x=np.nanmean(data[i]), c=fill_color[i], linewidth=2, zorder=0, linestyle='dashed')
if 'x_label' in kwargs and kwargs['x_label'] is not None:
ax.set_xlabel(kwargs['x_label'])
if 'y_label' in kwargs and kwargs['y_label'] is not None:
ax.set_ylabel(kwargs['y_label'])
elif 'y_label' in kwargs and kwargs['y_label'] is None:
pass
else:
ax.set_ylabel('Probability density')
if 'show_legend' in kwargs and kwargs['show_legend'] is True:
ax.legend()
# set x limits
if 'x_lim' in kwargs:
ax.set_xlim(kwargs['x_lim'])
# setting shrinking factor for font size for title
if 'shrink_text' in kwargs.keys():
shrink_text = kwargs['shrink_text']
else:
shrink_text = 1
# add title
if 'title' in kwargs and kwargs['title'] is not None:
if len(data) == 1:
ax.set_title(kwargs['title'] + title, wrap=True,
fontsize=12 * shrink_text)
else:
ax.set_title(kwargs['title'], wrap=True, fontsize=12 * shrink_text)
else:
if len(data) == 1:
ax.set_title(title)
else:
ax.set_title('Histogram density plot')
# if 'show' in kwargs.keys():
# if kwargs['show'] is True:
# # Tweak spacing to prevent clipping of ylabel
# fig.tight_layout()
# fig.show()
# else:
# pass
# else:
# # Tweak spacing to prevent clipping of ylabel
# fig.tight_layout()
# fig.show()
#
# if 'fig' in kwargs.keys():
# return fig, ax
# imshow gray plot for a single frame tiff
def plot_single_tiff(tiff_path: str, title: str = None, frame_num: int = 0):
"""
plots an image of a single tiff frame after reading using tifffile.
:param tiff_path: path to the tiff file
:param title: give a string to use as title (optional)
:return: imshow plot
"""
stack = tf.imread(tiff_path, key=frame_num)
plt.imshow(stack, cmap='gray')
if title is not None:
plt.suptitle(title)
else:
plt.suptitle('frame num: %s' % frame_num)
plt.show()
return stack
############### CALCIUM IMAGING RELATED STUFF ##########################################################################
# paq2py by Llyod Russel
def paq_read(file_path=None, plot=False):
"""
Read PAQ file (from PackIO) into python
Lloyd Russell 2015
Parameters
==========
file_path : str, optional
full path to file to read in. if none is supplied a load file dialog
is opened, buggy on mac osx - Tk/matplotlib. Default: None.
plot : bool, optional
plot the data after reading? Default: False.
Returns
=======
data : ndarray
the data as a m-by-n array where m is the number of channels and n is
the number of datapoints
chan_names : list of str
the names of the channels provided in PackIO
hw_chans : list of str
the hardware lines corresponding to each channel
units : list of str
the units of measurement for each channel
rate : int
the acquisition sample rate, in Hz
"""
# file load gui
if file_path is None:
import Tkinter
import tkFileDialog
root = Tkinter.Tk()
root.withdraw()
file_path = tkFileDialog.askopenfilename()
root.destroy()
# open file
fid = open(file_path, 'rb')
# get sample rate
rate = int(np.fromfile(fid, dtype='>f', count=1))
# get number of channels
num_chans = int(np.fromfile(fid, dtype='>f', count=1))
# get channel names
chan_names = []
for i in range(num_chans):
num_chars = int(np.fromfile(fid, dtype='>f', count=1))
chan_name = ''
for j in range(num_chars):
chan_name = chan_name + chr(np.fromfile(fid, dtype='>f', count=1))
chan_names.append(chan_name)
# get channel hardware lines
hw_chans = []
for i in range(num_chans):
num_chars = int(np.fromfile(fid, dtype='>f', count=1))
hw_chan = ''
for j in range(num_chars):
hw_chan = hw_chan + chr(np.fromfile(fid, dtype='>f', count=1))
hw_chans.append(hw_chan)
# get acquisition units
units = []
for i in range(num_chans):
num_chars = int(np.fromfile(fid, dtype='>f', count=1))
unit = ''
for j in range(num_chars):
unit = unit + chr(np.fromfile(fid, dtype='>f', count=1))
units.append(unit)
# get data
temp_data = np.fromfile(fid, dtype='>f', count=-1)
num_datapoints = int(len(temp_data) / num_chans)
data = np.reshape(temp_data, [num_datapoints, num_chans]).transpose()
# close file
fid.close()
# plot
if plot:
# import matplotlib
# matplotlib.use('QT4Agg')
import matplotlib.pylab as plt
f, axes = plt.subplots(num_chans, 1, sharex=True, figsize=(10, num_chans), frameon=False)
for idx, ax in enumerate(axes):
ax.plot(data[idx])
ax.set_xlim([0, num_datapoints - 1])
ax.set_ylim([data[idx].min() - 1, data[idx].max() + 1])
# ax.set_ylabel(units[idx])
ax.set_title(chan_names[idx])
plt.tight_layout()
plt.show()
return {"data": data,
"chan_names": chan_names,
"hw_chans": hw_chans,
"units": units,
"rate": rate,
"num_datapoints": num_datapoints}
# useful for returning indexes when a
def threshold_detect(signal, threshold):
'''lloyd russell'''
thresh_signal = signal > threshold
thresh_signal[1:][thresh_signal[:-1] & thresh_signal[1:]] = False
frames = np.where(thresh_signal)
return frames[0]
# normalize dFF for 1dim array
def dff(flu, baseline=None):
"""delta F over F ratio (not % dFF )"""
if baseline is not None:
flu_dff = (flu - baseline) / baseline
else:
flu_mean = np.mean(flu, 1)
flu_dff = (flu - flu_mean) / flu_mean
return flu_dff
# simple ZProfile function for any sized square in the frame (equivalent to ZProfile function in Fiji)
def ZProfile(movie, area_center_coords: tuple = None, area_size: int = -1, plot_trace: bool = True,
plot_image: bool = True, plot_frame: int = 1, vasc_image: np.array = None, **kwargs):
"""
from Sarah Armstrong
Plot a z-profile of a movie, averaged over space inside a square area
movie = can be np.array of the TIFF stack or a tiff path from which it is read in
area_center_coords = coordinates of pixel at center of box (x,y)
area_size = int, length and width of the square in pixels
plot_frame = which movie frame to take as a reference to plot the area boundaries on
vasc_image = optionally include a vasculature image tif of the correct dimensions to plot the coordinates on.
"""
if type(movie) is str:
movie = tf.imread(movie)
print('plotting zprofile for TIFF of shape: ', movie.shape)
# assume 15fps for 1024x1024 movies and 30fps imaging for 512x512 movies
if movie.shape[1] == 1024:
img_fps = 15
elif movie.shape[1] == 512:
img_fps = 30
else:
img_fps = None
assert area_size <= movie.shape[1] and area_size <= movie.shape[2], "area_size must be smaller than the image"
if area_size == -1: # this parameter used to plot whole FOV area
area_size = movie.shape[1]
area_center_coords = (movie.shape[1] / 2, movie.shape[2] / 2)
assert area_size % 2 == 0, "pls give an even area size"
x = area_center_coords[0]
y = area_center_coords[1]
x1 = int(x - 1 / 2 * area_size)
x2 = int(x + 1 / 2 * area_size)
y1 = int(y - 1 / 2 * area_size)
y2 = int(y + 1 / 2 * area_size)
smol_movie = movie[:, y1:y2, x1:x2]
smol_mean = np.nanmean(smol_movie, axis=(1, 2))
print('|- Output shape for z profile: ', smol_mean.shape)
if plot_image:
f, ax1 = plt.subplots()
ref_frame = movie[plot_frame, :, :]
if vasc_image is not None:
assert vasc_image.shape == movie.shape[1:], 'vasculature image has incompatible dimensions'
ax1.imshow(vasc_image, cmap="binary_r")
else:
ax1.imshow(ref_frame, cmap="binary_r")
rect1 = patches.Rectangle(
(x1, y1), area_size, area_size, linewidth=1.5, edgecolor='r', facecolor="none")
ax1.add_patch(rect1)
ax1.set_title("Z-profile area")
plt.show()
if plot_trace:
if 'figsize' in kwargs:
figsize = kwargs['figsize']
else:
figsize = [10, 4]
fig, ax2 = plt.subplots(figsize=figsize)
if img_fps is not None:
ax2.plot(np.arange(smol_mean.shape[0]) / img_fps, smol_mean, linewidth=0.5, color='black')
ax2.set_xlabel('Time (sec)')
else:
ax2.plot(smol_mean, linewidth=0.5, color='black')
ax2.set_xlabel('frames')
ax2.set_ylabel('Flu (a.u.)')
if 'title' in kwargs:
ax2.set_title(kwargs['title'])
plt.show()
return smol_mean
def SaveDownsampledTiff(tiff_path: str = None, stack: np.array = None, group_by: int = 4, save_as: str = None,
plot_zprofile: bool = True):
"""
Create and save a downsampled version of the original tiff file. Original tiff file can be given as a numpy array stack
or a str path to the tiff.
:param tiff_path: path to the tiff to downsample
:param stack: numpy array stack of the tiff file already read in
:param group_by: specified interval for grouped averaging of the TIFF
:param save_as: path to save the downsampled tiff to, if none provided it will save to the same directory as the provided tiff_path
:param plot_zprofile: if True, plot the zaxis profile using the full TIFF stack provided.
:return: numpy array containing the downsampled TIFF stack
"""
print('downsampling of tiff stack...')
if save_as is None:
assert tiff_path is not None, "please provide a save path to save_as"
save_as = tiff_path[:-4] + '_downsampled.tif'
if stack is None:
# open tiff file
print('|- working on... %s' % tiff_path)
stack = tf.imread(tiff_path)
resolution = stack.shape[1]
# plot zprofile of full TIFF stack
if plot_zprofile:
ZProfile(movie=stack, plot_image=True, title=tiff_path)
# downsample to 8-bit
stack8 = np.full_like(stack, fill_value=0)
for frame in np.arange(stack.shape[0]):
stack8[frame] = convert_to_8bit(stack[frame], 0, 255)
# stack8 = stack
# grouped average by specified interval
num_frames = stack8.shape[0] // group_by
# avgd_stack = np.empty((num_frames, resolution, resolution), dtype='uint16')
avgd_stack = np.empty((num_frames, resolution, resolution), dtype='uint8')
frame_count = np.arange(0, stack8.shape[0], group_by)
for i in np.arange(num_frames):
frame = frame_count[i]
avgd_stack[i] = np.mean(stack8[frame:frame + group_by], axis=0)
avgd_stack = avgd_stack.astype(np.uint8)
# bin down to 512 x 512 resolution if higher resolution
shape = np.shape(avgd_stack)
if shape[1] != 512:
# input_size = avgd_stack.shape[1]
# output_size = 512
# bin_size = input_size // output_size
# final_stack = avgd_stack.reshape((shape[0], output_size, bin_size,
# output_size, bin_size)).mean(4).mean(2)
final_stack = avgd_stack
else:
final_stack = avgd_stack
# write output
print("\nsaving %s to... %s" % (final_stack.shape, save_as))
tf.imwrite(save_as, final_stack, photometric='minisblack')
return final_stack
def subselect_tiff(tiff_path: str = None, tiff_stack: np.array = None, select_frames: tuple = (0, 0),
save_as: str = None):
if tiff_stack is None:
# open tiff file
print('running subselecting tiffs')
print('|- working on... %s' % tiff_path)
tiff_stack = tf.imread(tiff_path)
stack_cropped = tiff_stack[select_frames[0]:select_frames[1]]
# stack8 = convert_to_8bit(stack_cropped)
if save_as is not None:
tf.imwrite(save_as, stack_cropped, photometric='minisblack')
return stack_cropped
def make_tiff_stack(sorted_paths: list, save_as: str):
"""
read in a bunch of tiffs and stack them together, and save the output as the save_as
:param sorted_paths: list of string paths for tiffs to stack
:param save_as: .tif file path to where the tif should be saved
"""
num_tiffs = len(sorted_paths)
print('working on tifs to stack: ', num_tiffs)
data_arr = None
with tf.TiffWriter(save_as, bigtiff=True) as tif:
for i, tif_ in enumerate(sorted_paths):
with tf.TiffFile(tif_, multifile=True) as input_tif:
data = input_tif.asarray()
msg = ' -- Writing tiff: ' + str(i + 1) + ' out of ' + str(num_tiffs) + f' to {save_as}'
print(msg, end='\r')
tif.save(data)
if data_arr is None:
data_arr = data
else:
data_arr = np.append(data_arr, data, axis=0)
tf.imwrite(save_as, data_arr, bigtiff=True)
return data_arr
def convert_to_8bit(img, target_type_min=0, target_type_max=255):
"""
:param img:
:param target_type:
:param target_type_min:
:param target_type_max:
:return:
"""
imin = img.min()
imax = img.max()
a = (target_type_max - target_type_min) / (imax - imin)
b = target_type_max - a * imax
new_img = (a * img + b).astype(np.uint8)
return new_img
#######
#### UTILS
def dataplot_frame_options():
import matplotlib as mpl
mpl.rcParams.update({
'axes.spines.top': False,
'axes.spines.right': False,
'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large',
'xtick.labelsize': 'x-large',
'ytick.labelsize': 'x-large',
'legend.frameon': False,
'figure.subplot.wspace': .01,
'figure.subplot.hspace': .01,
})
sns.set()
sns.set_style('white')
def lineplot_frame_options(fig, ax, x_label='', y_label=''):
sns.set()
sns.set_style('white')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# ax.spines['left'].set_visible(True)
ax.margins(0)
ax.set_xlabel('distance to target (um)')
ax.set_ylabel('influence of photostim')
|
"""This is an experimental implementation of cc_shared_library.
We may change the implementation at any moment or even delete this file. Do not
rely on this. It requires bazel >1.2 and passing the flag
--experimental_cc_shared_library
"""
load("//cc:find_cc_toolchain.bzl", "find_cc_toolchain")
# TODO(#5200): Add export_define to library_to_link and cc_library
# Add this as a tag to any target that can be linked by more than one
# cc_shared_library because it doesn't have static initializers or anything
# else that may cause issues when being linked more than once. This should be
# used sparingly after making sure it's safe to use.
LINKABLE_MORE_THAN_ONCE = "LINKABLE_MORE_THAN_ONCE"
_EXPORTED_BY_TAG_BEGINNING = "exported_by="
def exported_by(labels):
str_builder = []
for label in labels:
str_builder.append(label)
return _EXPORTED_BY_TAG_BEGINNING + ",".join(str_builder)
GraphNodeInfo = provider(
fields = {
"children": "Other GraphNodeInfo from dependencies of this target",
"exported_by": "Labels of targets that can export the library of this node",
"label": "Label of the target visited",
"linkable_more_than_once": "Linkable into more than a single cc_shared_library",
},
)
CcSharedLibraryInfo = provider(
fields = {
"dynamic_deps": "All shared libraries depended on transitively",
"exports": "cc_libraries that are linked statically and exported",
"link_once_static_libs": "All libraries linked statically into this library that should " +
"only be linked once, e.g. because they have static " +
"initializers. If we try to link them more than once, " +
"we will throw an error",
"linker_input": "the resulting linker input artifact for the shared library",
"preloaded_deps": "cc_libraries needed by this cc_shared_library that should" +
" be linked the binary. If this is set, this cc_shared_library has to " +
" be a direct dependency of the cc_binary",
},
)
def _separate_static_and_dynamic_link_libraries(
direct_children,
can_be_linked_dynamically,
preloaded_deps_direct_labels):
node = None
all_children = list(direct_children)
link_statically_labels = {}
link_dynamically_labels = {}
# Horrible I know. Perhaps Starlark team gives me a way to prune a tree.
for i in range(1, 2147483647):
if len(all_children) == 0:
break
node = all_children.pop(0)
node_label = str(node.label)
if node_label in can_be_linked_dynamically:
link_dynamically_labels[node_label] = True
elif node_label not in preloaded_deps_direct_labels:
link_statically_labels[node_label] = node.linkable_more_than_once
all_children.extend(node.children)
return (link_statically_labels, link_dynamically_labels)
def _create_linker_context(ctx, linker_inputs):
return cc_common.create_linking_context(
linker_inputs = depset(linker_inputs, order = "topological"),
)
def _merge_cc_shared_library_infos(ctx):
dynamic_deps = []
transitive_dynamic_deps = []
for dep in ctx.attr.dynamic_deps:
if dep[CcSharedLibraryInfo].preloaded_deps != None:
fail("{} can only be a direct dependency of a " +
" cc_binary because it has " +
"preloaded_deps".format(str(dep.label)))
dynamic_dep_entry = (
dep[CcSharedLibraryInfo].exports,
dep[CcSharedLibraryInfo].linker_input,
dep[CcSharedLibraryInfo].link_once_static_libs,
)
dynamic_deps.append(dynamic_dep_entry)
transitive_dynamic_deps.append(dep[CcSharedLibraryInfo].dynamic_deps)
return depset(direct = dynamic_deps, transitive = transitive_dynamic_deps)
def _build_exports_map_from_only_dynamic_deps(merged_shared_library_infos):
exports_map = {}
for entry in merged_shared_library_infos.to_list():
exports = entry[0]
linker_input = entry[1]
for export in exports:
if export in exports_map:
fail("Two shared libraries in dependencies export the same symbols. Both " +
exports_map[export].libraries[0].dynamic_library.short_path +
" and " + linker_input.dynamic_library.short_path +
" export " + export)
exports_map[export] = linker_input
return exports_map
def _build_link_once_static_libs_map(merged_shared_library_infos):
link_once_static_libs_map = {}
for entry in merged_shared_library_infos.to_list():
link_once_static_libs = entry[2]
linker_input = entry[1]
for static_lib in link_once_static_libs:
if static_lib in link_once_static_libs_map:
fail("Two shared libraries in dependencies link the same " +
" library statically. Both " + link_once_static_libs_map[static_lib] +
" and " + str(linker_input.owner) +
" link statically" + static_lib)
link_once_static_libs_map[static_lib] = str(linker_input.owner)
return link_once_static_libs_map
def _wrap_static_library_with_alwayslink(ctx, feature_configuration, cc_toolchain, linker_input):
new_libraries_to_link = []
for old_library_to_link in linker_input.libraries:
# TODO(#5200): This will lose the object files from a library to link.
# Not too bad for the prototype but as soon as the library_to_link
# constructor has object parameters this should be changed.
new_library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
static_library = old_library_to_link.static_library,
pic_static_library = old_library_to_link.pic_static_library,
alwayslink = True,
)
new_libraries_to_link.append(new_library_to_link)
return cc_common.create_linker_input(
owner = linker_input.owner,
libraries = depset(direct = new_libraries_to_link),
user_link_flags = depset(direct = linker_input.user_link_flags),
additional_inputs = depset(direct = linker_input.additional_inputs),
)
def _check_if_target_under_path(value, pattern):
if pattern.workspace_name != value.workspace_name:
return False
if pattern.name == "__pkg__":
return pattern.package == value.package
if pattern.name == "__subpackages__":
return _same_package_or_above(pattern, value)
return pattern.package == value.package and pattern.name == value.name
def _filter_inputs(
ctx,
feature_configuration,
cc_toolchain,
transitive_exports,
preloaded_deps_direct_labels,
link_once_static_libs_map):
linker_inputs = []
link_once_static_libs = []
graph_structure_aspect_nodes = []
dependency_linker_inputs = []
direct_exports = {}
for export in ctx.attr.exports:
direct_exports[str(export.label)] = True
dependency_linker_inputs.extend(export[CcInfo].linking_context.linker_inputs.to_list())
graph_structure_aspect_nodes.append(export[GraphNodeInfo])
can_be_linked_dynamically = {}
for linker_input in dependency_linker_inputs:
owner = str(linker_input.owner)
if owner in transitive_exports:
can_be_linked_dynamically[owner] = True
(link_statically_labels, link_dynamically_labels) = _separate_static_and_dynamic_link_libraries(
graph_structure_aspect_nodes,
can_be_linked_dynamically,
preloaded_deps_direct_labels,
)
owners_seen = {}
for linker_input in dependency_linker_inputs:
owner = str(linker_input.owner)
if owner in owners_seen:
continue
owners_seen[owner] = True
if owner in link_dynamically_labels:
dynamic_linker_input = transitive_exports[owner]
linker_inputs.append(dynamic_linker_input)
elif owner in link_statically_labels:
if owner in link_once_static_libs_map:
fail(owner + " is already linked statically in " +
link_once_static_libs_map[owner] + " but not exported")
if owner in direct_exports:
wrapped_library = _wrap_static_library_with_alwayslink(
ctx,
feature_configuration,
cc_toolchain,
linker_input,
)
if not link_statically_labels[owner]:
link_once_static_libs.append(owner)
linker_inputs.append(wrapped_library)
else:
can_be_linked_statically = False
for static_dep_path in ctx.attr.static_deps:
static_dep_path_label = ctx.label.relative(static_dep_path)
owner_label = linker_input.owner
if _check_if_target_under_path(linker_input.owner, static_dep_path_label):
can_be_linked_statically = True
break
if can_be_linked_statically:
if not link_statically_labels[owner]:
link_once_static_libs.append(owner)
linker_inputs.append(linker_input)
else:
fail("We can't link " +
str(owner) + " either statically or dynamically")
return (linker_inputs, link_once_static_libs)
def _same_package_or_above(label_a, label_b):
if label_a.workspace_name != label_b.workspace_name:
return False
package_a_tokenized = label_a.package.split("/")
package_b_tokenized = label_b.package.split("/")
if len(package_b_tokenized) < len(package_a_tokenized):
return False
for i in range(len(package_a_tokenized)):
if package_a_tokenized[i] != package_b_tokenized[i]:
return False
return True
def _cc_shared_library_impl(ctx):
cc_common.check_experimental_cc_shared_library()
cc_toolchain = find_cc_toolchain(ctx)
feature_configuration = cc_common.configure_features(
ctx = ctx,
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
merged_cc_shared_library_info = _merge_cc_shared_library_infos(ctx)
exports_map = _build_exports_map_from_only_dynamic_deps(merged_cc_shared_library_info)
for export in ctx.attr.exports:
if str(export.label) in exports_map:
fail("Trying to export a library already exported by a different shared library: " +
str(export.label))
can_be_exported = _same_package_or_above(ctx.label, export.label)
if not can_be_exported:
for exported_by in export[GraphNodeInfo].exported_by:
exported_by_label = Label(exported_by)
if _check_if_target_under_path(ctx.label, exported_by_label):
can_be_exported = True
break
if not can_be_exported:
fail(str(export.label) + " cannot be exported from " + str(ctx.label) +
" because it's not in the same package/subpackage or the library " +
"to be exported doesn't have this cc_shared_library in the exported_by tag.")
preloaded_deps_direct_labels = {}
preloaded_dep_merged_cc_info = None
if len(ctx.attr.preloaded_deps) != 0:
preloaded_deps_cc_infos = []
for preloaded_dep in ctx.attr.preloaded_deps:
preloaded_deps_direct_labels[str(preloaded_dep.label)] = True
preloaded_deps_cc_infos.append(preloaded_dep[CcInfo])
preloaded_dep_merged_cc_info = cc_common.merge_cc_infos(cc_infos = preloaded_deps_cc_infos)
link_once_static_libs_map = _build_link_once_static_libs_map(merged_cc_shared_library_info)
(linker_inputs, link_once_static_libs) = _filter_inputs(
ctx,
feature_configuration,
cc_toolchain,
exports_map,
preloaded_deps_direct_labels,
link_once_static_libs_map,
)
linking_context = _create_linker_context(ctx, linker_inputs)
user_link_flags = []
for user_link_flag in ctx.attr.user_link_flags:
user_link_flags.append(ctx.expand_location(user_link_flag, targets = ctx.attr.additional_linker_inputs))
linking_outputs = cc_common.link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
linking_contexts = [linking_context],
user_link_flags = user_link_flags,
additional_inputs = ctx.files.additional_linker_inputs,
name = ctx.label.name,
output_type = "dynamic_library",
)
runfiles = ctx.runfiles(
files = [linking_outputs.library_to_link.resolved_symlink_dynamic_library],
)
for dep in ctx.attr.dynamic_deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
exports = []
for export in ctx.attr.exports:
exports.append(str(export.label))
return [
DefaultInfo(
files = depset([linking_outputs.library_to_link.resolved_symlink_dynamic_library]),
runfiles = runfiles,
),
CcSharedLibraryInfo(
dynamic_deps = merged_cc_shared_library_info,
exports = exports,
link_once_static_libs = link_once_static_libs,
linker_input = cc_common.create_linker_input(
owner = ctx.label,
libraries = depset([linking_outputs.library_to_link]),
),
preloaded_deps = preloaded_dep_merged_cc_info,
),
]
def _graph_structure_aspect_impl(target, ctx):
children = []
if hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
if GraphNodeInfo in dep:
children.append(dep[GraphNodeInfo])
exported_by = []
linkable_more_than_once = False
if hasattr(ctx.rule.attr, "tags"):
for tag in ctx.rule.attr.tags:
if tag.startswith(_EXPORTED_BY_TAG_BEGINNING) and len(tag) > len(_EXPORTED_BY_TAG_BEGINNING):
for target in tag[len(_EXPORTED_BY_TAG_BEGINNING):].split(","):
# Only absolute labels allowed. Targets in same package
# or subpackage can be exported anyway.
if not target.startswith("//") and not target.startswith("@"):
fail("Labels in exported_by of " + str(target) +
" must be absolute.")
Label(target) # Checking synthax is ok.
exported_by.append(target)
elif tag == LINKABLE_MORE_THAN_ONCE:
linkable_more_than_once = True
return [GraphNodeInfo(
label = ctx.label,
children = children,
exported_by = exported_by,
linkable_more_than_once = linkable_more_than_once,
)]
graph_structure_aspect = aspect(
attr_aspects = ["*"],
implementation = _graph_structure_aspect_impl,
)
cc_shared_library = rule(
implementation = _cc_shared_library_impl,
attrs = {
"additional_linker_inputs": attr.label_list(allow_files = True),
"dynamic_deps": attr.label_list(providers = [CcSharedLibraryInfo]),
"exports": attr.label_list(providers = [CcInfo], aspects = [graph_structure_aspect]),
"preloaded_deps": attr.label_list(providers = [CcInfo]),
"static_deps": attr.string_list(),
"user_link_flags": attr.string_list(),
"_cc_toolchain": attr.label(default = "@bazel_tools//tools/cpp:current_cc_toolchain"),
},
toolchains = ["@rules_cc//cc:toolchain_type"], # copybara-use-repo-external-label
fragments = ["cpp"],
)
for_testing_dont_use_check_if_target_under_path = _check_if_target_under_path
|
import numpy as np
from numpy.linalg import norm
import h5py as hp
import subprocess
import os
class WrapperCelestlab:
"""Object to run selectal from python"""
def __init__(self, scilab_path=None, celestlab_loader="loader_celestlab.sce"):
self.celestlab_loader = celestlab_loader
if not os.path.isdir(scilab_path) :
raise RuntimeError("The `scilab_path` argument is not a directory")
if scilab_path is not None:
self.scilab_path = scilab_path
else:
raise RuntimeError("The scilab executable need to be provided")
if os.name == "nt":
"""Running on Windows
to launch scilab without the window, we need to use the good path"""
self.scilab_exec = self.scilab_path + "/WScilex-cli.exe"
elif os.name == "posix":
"""Running on linux
We launch with the usual `scilab` and a CLI argument """
self.scilab_exec = self.scilab_path + "/scilab -nw"
self.scilab_exec = self.scilab_path + "/scilab-cli"
self.celestlab_exec = "Celestlab_loader="+self.celestlab_loader+" "+self.scilab_exec
print(self.celestlab_exec)
def get_default_params(self):
"""return a dict ith the parameters that can be used in celestlab"""
params = {
"year": 2020,
"month": 6,
"day": 15,
"hour": 12,
"minutes": 30,
"outputFileName": "results.h5",
"sma": 7200.0e3, # semi major axis
"ecc": 1.0e-3, # eccentricity
"inc": 98 * np.pi / 180, # inclination
"pom": np.pi / 2, # Argument of perigee
"mlh": 10.5, # MLTAN(hours)(mean local time of ascending node))
"anm": 0, # Mean anomaly
"number_days": 1, # the total number of days simulated
"timestep": 3 / 86400, # The time step of the simulations
}
return params
def write_paramerter_file(self, other_params):
"""Write the parameter_file that is used in CelestLab.
Celestlab i/o is shit, so the order of the parameters given is very important"""
params = self.get_default_params()
for k, v in other_params.items():
if k in params:
params[k] = v
else:
raise RuntimeError(
f"Parameter {k} is not understood. \n Available parameters are {params.keys()}"
)
filename = "./parameters.txt"
parameter_key_list = [
"year",
"month",
"day",
"hour",
"minutes",
"sma",
"ecc",
"inc",
"mlh",
"anm",
"number_days",
"timestep",
"outputFileName",
]
with open(filename, "w") as f:
for k in parameter_key_list:
f.write(f"{params[k]}\n")
def launch_celestlab(self, scriptname="crocus_power.sce"):
"""launch the celestlab script with subprocess"""
command = self.scilab_exec + " -f "+scriptname
my_env = os.environ.copy()
my_env["LIBGL_ALWAYS_SOFTWARE"] = "1" # Should be used to disable the hardware accelleration of libGL used by Scilab
my_env["Celestlab_loader"] = self.celestlab_loader
p = subprocess.Popen(command.split(), env=my_env)
p.wait()
def read_celestlab_results(self, filename="./results.h5", dataset_keys = ["Sun_dir", "pos_ecf", "interv"]):
"""The celestlab script dumps the results in a HDF5 file. """
data = []
with hp.File(filename, "r") as f:
# print(f.keys())
data = [ f[k][()].T for k in dataset_keys ]
cjd = f["cjd"][()]
cjd = cjd[:, 0]
return *data, cjd
|
'''
researcher.py: representation of an researcher record
Authors
-------
Michael Hucka <mhucka@caltech.edu> -- Caltech Library
Copyright
---------
Copyright (c) 2019 by the California Institute of Technology. This code is
open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
from .author import Author
from .data_helpers import objattr, set_objattr, new_object
from .debug import log
from .exceptions import *
from .organization import Organization
from .person import Person
class Researcher(Person):
_new_attributes = ['affiliations', 'role']
_attributes = _new_attributes + Person._attributes
def __init__(self, data, creator = None, dimensions_obj = None):
if isinstance(data, Author):
# We're given an author object, probably obtained from a pub search,
# and we want to fill it out to create a Researcher object.
if __debug__: log('converting Author {} to Researcher', id(data))
dimensions = objattr(data, '_dimensions', None)
super().__init__(data._orig_data, data, dimensions)
else:
# This is a standard initialization, not a case of upconverting.
super().__init__(data, creator, dimensions_obj)
def _lazy_expand(self, data):
# Be careful not to invoke "self.x" b/c it causes infinite recursion.
if __debug__: log('expanding attributes on {} using {}', id(self), data)
super()._lazy_expand(data)
# When researcher data comes from a grant, there may be a 'role' field.
set_objattr(self, 'role', data.get('role', ''), overwrite = True)
set_affiliations = objattr(self, '_set_affiliations')
set_affiliations(data)
def _fill_record(self, data):
# Be careful not to invoke "self.x" b/c it causes infinite recursion.
if __debug__: log('filling object {} using {}', id(self), data)
set_affiliations = objattr(self, '_set_affiliations')
set_affiliations(data)
def _set_affiliations(self, data, field_name = 'research_orgs'):
if field_name not in data or len(data[field_name]) == 0:
return
affiliations = objattr(self, 'affiliations', [])
dimensions = objattr(self, '_dimensions', None)
if isinstance(data[field_name][0], str):
# Case 1: it's a list of grid id's.
for org_id in data[field_name]:
for existing_org in affiliations:
if org_id == existing_org.id:
# Nothing more to do, b/c all we have is the id.
break
else: # This 'else' is for the inner 'for' loop, not the 'if' stmt.
affiliations.append(new_object(Organization, {'id': org_id}, dimensions, self))
else:
# Case 2: it's a list of dict's containing org field/value data.
for org_data in data[field_name]:
for existing_org in affiliations:
if org_data['id'] == existing_org.id:
existing_org._set_attributes(org_data, overwrite = False)
break
else: # This 'else' is for the inner 'for' loop, not the 'if' stmt.
affiliations.append(new_object(Organization, org_data, dimensions, self))
set_objattr(self, 'affiliations', affiliations, overwrite = True)
|
from pyramid import threadlocal
from abc import abstractproperty
from sqlalchemy import and_, or_, asc, orm, event
from sqlalchemy_utils import get_hybrid_properties
from sqlalchemy.ext.declarative import declared_attr
import json
from ..core import Base
from ..GenericObjets import BusinessRules
from .frontmodules import FrontModules, ModuleForms, ModuleGrids
from ..utils.parseValue import parser, formatValue
class ConfiguredDbObjectMapped(object):
@abstractproperty
def moduleFormName(self):
pass
@abstractproperty
def moduleGridName(self):
pass
def __init__(self):
self.session = threadlocal.get_current_request().dbsession
def getConf(self, moduleName=None):
if not moduleName:
moduleName = self.moduleFormName
conf = self.session.query(FrontModules
).filter(FrontModules.Name == moduleName
).first()
return conf
def getForm(self, displayMode='edit', type_=None, moduleName=None, isGrid=False):
Editable = (displayMode.lower() == 'edit')
schema = {}
if not self.ID:
displayMode = 'create'
fields = self.session.query(ModuleForms
).filter(
and_(ModuleForms.Module_ID == self.getConf(moduleName).ID,
ModuleForms.FormRender > 0))
if type_:
fields = fields.filter(or_(ModuleForms.TypeObj == type_,
ModuleForms.TypeObj == None))
fields.order_by(ModuleForms.FormOrder).all()
for field in fields:
# CurModuleForms = list(
# filter(lambda x: field.Name == x.key, self.__table__.columns))
CurModuleForms = [1]
if (len(CurModuleForms) > 0):
schema[field.Name] = field.GetDTOFromConf(
displayMode, isGrid=isGrid)
form = {'schema': schema,
'fieldsets': self.sortFieldsets(fields),
'grid': isGrid,
# 'data': {'id': 0}
'recursive_level': 0
}
form = self.getDefaultValue(form)
del form['recursive_level']
return form
def sortFieldsets(self, fields):
''' return ordered FiledSet according to configuration '''
sortedFieldsets = []
Legends = sorted([(obj.Legend, obj.FormOrder, obj.Name)
for obj in fields if obj.FormOrder is not None],
key=lambda x: x[1])
Unique_Legends = list()
# Get distinct Fieldset in correct order
for x in Legends:
if x[0] not in Unique_Legends:
Unique_Legends.append(x[0])
for curLegend in Unique_Legends:
curFieldSet = {'fields': [], 'legend': curLegend}
sortedFieldsets.append(curFieldSet)
for curProp in Legends:
curIndex = Unique_Legends.index(curProp[0])
sortedFieldsets[curIndex]['fields'].append(curProp[2])
return sortedFieldsets
def getGrid(self, type_=None, moduleName=None):
gridFields = self.session.query(ModuleGrids
).filter(
and_(ModuleGrids.Module_ID == self.getConf(moduleName).ID,
ModuleGrids.GridRender > 0))
if type_:
gridFields = gridFields.filter(or_(ModuleGrids.TypeObj == type_,
ModuleGrids.TypeObj == None))
gridFields = gridFields.order_by(asc(ModuleGrids.GridOrder)).all()
return [curConf.GenerateColumn() for curConf in gridFields]
def getFilters(self, type_=None, moduleName=None):
''' Function to call : return Name and Type of Filters to display in front end
according to configuration in table ModuleGrids'''
filters = []
filterFields = self.session.query(ModuleGrids
).filter(
ModuleGrids.Module_ID == self.getConf(moduleName).ID)
if type_:
filterFields = filterFields.filter(or_(ModuleGrids.TypeObj == type_,
ModuleGrids.TypeObj == None))
filterFields = filterFields.order_by(asc(ModuleGrids.FilterOrder)).all()
for curConf in filterFields:
if curConf.IsSearchable:
filters.append(curConf.GenerateFilter())
elif curConf.QueryName is not None and curConf.FilterRender != 0:
filters.append(curConf.GenerateFilter())
return filters
def getDefaultValue(self, form):
defaultValues = {}
recursive_level = form['recursive_level']
for key, value in form['schema'].items():
if 'defaultValue' in value and value['defaultValue'] is not None:
defaultValues[key] = value['defaultValue']
if 'subschema' in value:
temp = {'schema': value['subschema'], 'defaultValues': {
}, 'recursive_level': recursive_level + 1}
subData = self.getDefaultValue(temp)
form['schema'][key]['subschema']['defaultValues'] = subData
if recursive_level < 1:
form['schema']['defaultValues'] = defaultValues
else:
form = defaultValues
return form
class DbObject(object):
def __init__(self):
self.__constraintFunctionList__ = []
self.__properties__ = {}
self.session = threadlocal.get_current_request().dbsession
@orm.reconstructor
def init_on_load(self):
''' init_on_load is called on the fetch of object '''
self.__init__()
def getProperty(self, nameProp):
if hasattr(self, nameProp):
return getattr(self, nameProp)
else:
return self.__properties__[nameProp]
def setProperty(self, propertyName, value):
''' Set object properties (static and dynamic) '''
if hasattr(self, propertyName):
if propertyName in self.__table__.c:
value = parser(value)
setattr(self, propertyName, value)
self.__properties__[propertyName] = value
def updateFromJSON(self, data, startDate=None):
''' Function to call : update properties of new
or existing object with JSON/dict of value'''
# if self.checkConstraintsOnData(data):
for curProp in data:
if (curProp.lower() != 'id' and data[curProp] != '-1'):
if (isinstance(data[curProp], str)
and len(data[curProp].split()) == 0):
data[curProp] = None
self.setProperty(curProp, data[curProp], startDate)
def formatData(self, data):
return
@classmethod
def getBuisnessRules(cls):
return dbConfig['dbSession'].query(BusinessRules
).filter_by(target=cls.__tablename__
).all()
@declared_attr
def loadBusinessRules(cls):
@event.listens_for(Base.metadata,'after_create')
def afterConfigured(target, connection, **kwargs):
cls.__constraintRules__ = { 'before_update':[],
'after_update':[],
'before_insert':[],
'after_insert':[],
'before_delete':[],
'after_delete':[]
}
rules = cls.getBuisnessRules()
if rules:
m = [cls.__constraintRules__[rule.actionType].append(rule)
in cls.__constraintRules__
for rule in rules
if rule.actionType in cls.__constraintRules__]
@declared_attr
def onEvent(cls):
events = ['before_insert', 'before_update']
@event.listens_for(cls, 'before_update')
def before_update(mapper, connection, target):
cls.executeBusinessRules(target, 'before_update')
@event.listens_for(cls, 'after_update')
def after_update(mapper, connection, target):
cls.executeBusinessRules(target, 'after_update')
@event.listens_for(cls, 'before_insert')
def before_insert(mapper, connection, target):
cls.executeBusinessRules(target, 'before_insert')
@event.listens_for(cls, 'after_insert')
def after_insert(mapper, connection, target):
cls.executeBusinessRules(target, 'after_insert')
# @event.listens_for(cls, 'before_delete')
# def before_delete(mapper, connection, target):
# cls.executeBusinessRules(target, 'before_delete')
@event.listens_for(cls, 'after_delete')
def after_delete(mapper, connection, target):
cls.executeBusinessRules(target, 'after_delete')
@classmethod
def executeBusinessRules(cls, target, event):
if cls.__constraintRules__[event]:
entityDTO = target.getFlatObject()
for rule in cls.__constraintRules__[event]:
if (not rule.targetTypes
or (hasattr(target, 'GetType') and target.GetType().ID in rule.targetTypes)):
result = rule.execute(entityDTO)
def afterUpdate(self):
return
def beforeDelete(self):
return
def afterDelete(self):
return
def getFlatObject(self, schema=None):
''' return flat object with static properties and last existing value of dyn props '''
data = {}
hybrid_properties = list(get_hybrid_properties(self.__class__).keys())
# if self.ID is not None:
max_iter = max(len(self.__table__.columns), len(
self.__properties__), len(hybrid_properties))
for i in range(max_iter):
# Get static Properties
try:
curStatProp = list(self.__table__.columns)[i]
data[curStatProp.key] = self.getProperty(
curStatProp.key)
except:
pass
# Get dynamic Properties
try:
curDynPropName = list(self.__properties__)[i]
data[curDynPropName] = self.getProperty(curDynPropName)
except Exception as e:
pass
try:
PropName = hybrid_properties[i]
data[PropName] = self.getProperty(PropName)
except Exception as e:
pass
# if not schema and hasattr(self, 'getForm'):
# schema = self.getForm()['schema']
if schema:
data = formatValue(data, schema)
return data
|
from utils import cache
from rq import Worker, Queue, Connection
listen = ['high', 'default', 'low']
if __name__ == '__main__':
with Connection(cache):
worker = Worker(map(Queue, listen))
worker.work()
|
import os
import datetime
import json
import magic
import shutil
from pathlib import Path
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, kmlgen, is_platform_windows
def get_instagramDevices(files_found, report_folder, seeker, wrap_text):
for file_found in files_found:
file_found = str(file_found)
filename = os.path.basename(file_found)
if filename.startswith('devices.json'):
data_list =[]
with open(file_found, "rb") as fp:
deserialized = json.load(fp)
devices = (deserialized['devices_devices'])
for x in devices:
deviceid = (x['string_map_data']['Device ID'].get('value', ''))
timestamp = (x['string_map_data']['Last Login'].get('timestamp', ''))
if timestamp > 0:
timestamp = (datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S'))
useragent = (x['string_map_data']['User Agent'].get('value', ''))
data_list.append((timestamp, deviceid, useragent))
if data_list:
report = ArtifactHtmlReport('Instagram Archive - Devices')
report.start_artifact_report(report_folder, 'Instagram Archive - Devices')
report.add_script()
data_headers = ('Last Login Timestamp', 'Device ID', 'User Agent')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'Instagram Archive - Devices'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Instagram Archive - Devices'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Instagram Archive - Devices data available')
|
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from tero import setup
from tero.setup import stageFile
class logrotateSetup(setup.SetupTemplate):
logrotate_templates = {
'logrotate_docker_log_template': {
'filepath': '/etc/logrotate.d/',
'filename': 'docker',
'template': """/var/log/docker.log {
create 0600 root root
daily
rotate 70
missingok
notifempty
sharedscripts
postrotate
set -x;
INSTANCE_ID=`wget -v -O - http://instance-data/latest/meta-data/instance-id | sed -e s/i-/-/`
LOGS=$1
ROTATEDFILE=`ls -t /var/log/docker.log* | head -n3 | grep -v '\.log\$' | grep -v '\.gz\$' | head -n1`
TIMESTAMP=`stat -c %Y $ROTATEDFILE`
mv -nv $ROTATEDFILE ${ROTATEDFILE/\.log*/.log-$TIMESTAMP}
/bin/gzip -v9 ${ROTATEDFILE/\.log*/.log-$TIMESTAMP}
/usr/local/bin/dcopylogs --quiet --location s3://djaoapp-logs/docker --logsuffix=$INSTANCE_ID $LOGS
endscript
lastaction
/bin/sh -c 'syslog-ng-ctl reopen 2>/dev/null || kill -HUP `pgrep syslog-ng 2>/dev/null` 2>/dev/null || true'
endscript
}
"""
},
'logrotatehook_500_err_sh_template': {
'filepath': '/usr/local/bin/',
'filename': 'logrotatehook-500error.sh',
'template': """#!/bin/sh
#
# Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# When passed stdin (from a filtered set of logs) this script will rotate logs after 30 seconds of no data on the stream unless no data is sent.
tt=`stat -c %Y -`;while :;do [ $((`date +%s` - `stat -c %Y -`)) -le 30 ] && nn=""; [ -z $nn ] && [ $tt != `stat -c %Y -` ] && [ $((`date +%s` - `stat -c %Y -`)) -ge 30 ] && nn="1" && /sbin/logrotate -vf /etc/logrotate.d/docker;sleep 1;done >>/var/log/logrotatehook-500error-sh.log 2>&1
"""
}
}
def __init__(self, name, files, **kwargs):
super(logrotateSetup, self).__init__(name, files, **kwargs)
self.daemons = []
def run(self, context):
complete = super(logrotateSetup, self).run(context)
if not complete:
# As long as the default setup cannot find all prerequisite
# executable, libraries, etc. we cannot update configuration
# files here.
return complete
# Install logrotate config file for docker.log
for templ in self.logrotate_templates:
_, asset_path = stageFile(os.path.join(
self.logrotate_templates[templ]['filepath'], self.logrotate_templates[templ]['filename']), context)
with open(asset_path, 'w') as asset_file:
asset_file.write(self.logrotate_templates[templ]['template'])
setup.postinst.shellCommand(['chmod', '0755', '/usr/local/bin/logrotatehook-500error.sh'])
return complete
|
from celery import Celery
from celery.schedules import crontab
import utils
from main import rdb
task = Celery('tasks',
broker='redis://localhost:6379/0',
backend='redis://localhost:6379/0')
task.conf.timezone = 'UTC'
'''
@task.task
def function():
...
'''
@task.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
# Calls test('hello') every 10 seconds.
sender.add_periodic_task(10.0, clean_key_pairs.s(), name='clean every 10s')
@task.task
def clean_key_pairs():
utils.clean_key_pairs(rdb)
|
import pyotp
import base64
from datetime import datetime
from django.core.mail import EmailMessage
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.response import Response
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework import permissions
from auth_api.serializers import UserSerializer, GenerateOTPSerializer, \
VerifyOTPSerializer, LoginTokenSerializer
from .utils import Util
EXPIRY_TIME = 1000
class GenerateKey:
"""Generate key using user email and random str"""
@staticmethod
def return_value(email):
return str(email) + str(datetime.date(datetime.now())) + "abcd"
class RegisterUserView(generics.CreateAPIView):
permission_classes = (permissions.AllowAny,)
"""Create new user in the system"""
serializer_class = UserSerializer
class GenerateOTPView(APIView):
"""Generate OTP for verification"""
serializer_class = GenerateOTPSerializer
def post(self, request):
email = request.data.get('email')
try:
user = get_user_model().objects.get(email=email)
except ObjectDoesNotExist:
return Response({"message":"User does not exist"}, status=404)
if user.is_active == False:
keygen = GenerateKey().return_value(email)
user.otp_key = keygen
user.save()
key = base64.b32encode(keygen.encode())
OTP = pyotp.TOTP(key, interval=EXPIRY_TIME )
token=OTP.now()
email_body=f'Hi{user.email}\n Please copy the code below to verify your email \n {token}'
data={'email_body':email_body,'to_email':[user.email],'email_subject':'Verify your email'}
Util.send_email(data)
return Response({"OTP": OTP.now()}, status=201)
else:
return Response({"message": "Your account is active already, proceed to login"}, status=200)
class VerifyOTPView(APIView):
"""Verify generated OTP"""
serializer_class = VerifyOTPSerializer
# @staticmethod
def post(self, request):
email = request.data.get('email')
otp_code = request.data.get('otp_code')
try:
user = get_user_model().objects.get(email=email)
except ObjectDoesNotExist:
return Response({"message":"User does not exist"}, status=404)
key = base64.b32encode(user.otp_key.encode())
OTP = pyotp.TOTP(key, interval=EXPIRY_TIME)
if OTP.verify(otp_code):
user.is_active = True
user.save()
return Response({"message": "Registration completed successfully, proceed to login"}, status=200)
else:
return Response({"message": "OTP is wrong/expired"}, status=400)
class LoginTokenView(ObtainAuthToken):
"""Create a new auth token for user"""
serializer_class = LoginTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
|
import taichi as ti
@ti.kernel
def from_torch_template(expr: ti.template(), torch_tensor: ti.ext_arr()):
for i in expr:
expr[i] = torch_tensor[i]
@ti.kernel
def to_torch_template(expr: ti.template(), torch_tensor: ti.ext_arr()):
for i in expr:
torch_tensor[i] = expr[i]
def from_torch(expr, torch_tensor):
if not expr.from_torch_:
expr.from_torch_ = lambda x: from_torch_template(expr, x.contiguous())
expr.from_torch_(torch_tensor)
def to_torch(expr, torch_tensor):
if not expr.to_torch_:
expr.to_torch_ = lambda x: to_torch_template(expr, x.contiguous())
expr.to_torch_(torch_tensor)
|
import mido
with mido.open_input('New Port', virtual=True) as inport:
for message in inport:
print(message) |
import os
modules = ["youtube-dl", "pafy"]
print("""
This file will simply install ever pip package needed to use Andromeda
""")
input("Press Enter to Continue: ")
for module in modules:
os.system(f"pip install {module}")
input("Press Enter to Exit: ")
|
from cloudsigma.version import __version__
from cloudsigma import bulk
from cloudsigma import conf
from cloudsigma import errors
from cloudsigma import generic
from cloudsigma import resource
from cloudsigma import scenarios
from cloudsigma import metadata
|
import random
import shutil
import piexif
from matplotlib import pyplot as plt
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
INPUT_SIZE = 128
# Do not import fully connected layers
vgg16 = VGG16(include_top = False, weights = 'imagenet', input_shape = (INPUT_SIZE, INPUT_SIZE, 3))
# Freeze convolution/pool max layers
for layer in vgg16.layers:
layer.trainable = False
# Add fully connected layer (manual way vs .add() function) - This is the only layer we are training
input_ = vgg16.input
output_ = vgg16(input_)
last_layer = Flatten(name = 'flatten')(output_)
last_layer = Dense(1, activation = 'sigmoid')(last_layer)
model = Model(input = input_, output = last_layer)
# Define hyperparameters
BATCH_SIZE = 16
STEPS_PER_EPOCH = 200
EPOCHS = 3
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
training_data_generator = ImageDataGenerator(rescale = 1./255)
testing_data_generator = ImageDataGenerator(rescale = 1./255)
training_set = training_data_generator.flow_from_directory('PetImages/Train/',
target_size = (INPUT_SIZE, INPUT_SIZE),
batch_size = BATCH_SIZE,
class_mode = 'binary')
test_set = testing_data_generator.flow_from_directory('PetImages/Test/',
target_size = (INPUT_SIZE, INPUT_SIZE),
batch_size = BATCH_SIZE,
class_mode = 'binary')
model.fit_generator(training_set, steps_per_epoch = STEPS_PER_EPOCH, epochs = EPOCHS, verbose = 1)
# score = model.evaluate_generator(test_set, len(test_set))
# print(f'\n')
# for idx, metric in enumerate(model.metrics_names):
# print("{}: {}".format(metric, score[idx]))
# Result Analysis
# • Strongly right predictions: The model predicted these images correctly, and the output value is > 0.8 or < 0.2
# • Strongly wrong predictions: The model predicted these images wrongly, and the output value is > 0.8 or < 0.2
# • Weakly wrong predictions: The model predicted these images wrongly, and the output value is between 0.4 and 0.6
strongly_wrong_idx = []
strongly_right_idx = []
weakly_wrong_idx = []
test_set = testing_data_generator.flow_from_directory('PetImages/Test/',
target_size = (INPUT_SIZE,INPUT_SIZE),
batch_size = 1,
class_mode = 'binary')
for i in range(test_set.__len__()):
img = test_set.__getitem__(i)[0]
pred_prob = model.predict(img)[0][0]
pred_label = int(pred_prob >0.5)
actual_label = int(test_set.__getitem__(i)[1][0])
if pred_label != actual_label and (pred_prob >0.8 or pred_prob <0.2):
strongly_wrong_idx.append(i)
elif pred_label != actual_label and (pred_prob >0.4 and pred_prob <0.6):
weakly_wrong_idx.append(i)
elif pred_label == actual_label and (pred_prob >0.8 or pred_prob <0.2):
strongly_right_idx.append(i)
# stop once we have enough images to plot
if (len(strongly_wrong_idx)>=9 and len(strongly_right_idx)>=9 and len(weakly_wrong_idx)>=9):
break
def plot_on_grid(test_set, idx_to_plot, img_size=INPUT_SIZE):
fig, ax = plt.subplots(3, 3, figsize=(20,10))
for i, idx in enumerate(random.sample(idx_to_plot, 9)):
img = test_set[idx][0].reshape(img_size, img_size, 3)
ax[int(i/3), i%3].imshow(img)
ax[int(i/3), i%3].axis('off')
plt.show()
plot_on_grid(test_set, strongly_right_idx)
plot_on_grid(test_set, strongly_wrong_idx)
|
import os
fauvel = open('FauvPieces.txt','r')
for piece in fauvel:
os.system(piece)
fauvel.close()
ivtrem = open('IvTremPieces.txt', 'r')
for piece in ivtrem:
os.system(piece)
ivtrem.close() |
# coding:utf-8
# 训练模型的代码
from time import *
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tools.my_dataset import MyDataset
from tools.iresnet import get_model
from tools.margin_softmax import ArcFace
from tools.partial_fc import PartialFC
def lr_func(epoch):
return ((epoch + 1) / (4 + 1)) ** 2 if epoch < -1 else 0.1 ** len(
[m for m in [11, 17, 22] if m - 1 <= epoch]
)
def train_function(image_path_txt, class_num, save_backbone_model_name, save_weight_name_path, save_weight_mom_name_path):
batch_size = 32
train_transformer = transforms.Compose([
transforms.Resize([89, 109]),
transforms.ToTensor(),
])
train_dataset = MyDataset(txt_path=image_path_txt, transform=train_transformer, target_transform=None)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
backbone = get_model("r18", dropout=0, fp16=False, num_features=512)
backbone.train()
if torch.cuda.is_available():
backbone = backbone.cuda()
margin_softmax = ArcFace()
module_partial_fc = PartialFC(
rank=0, local_rank=0, world_size=1, resume=0,
batch_size=batch_size, margin_softmax=margin_softmax, num_classes=class_num,
sample_rate=1, embedding_size=512, prefix="./")
opt_backbone = torch.optim.Adam(
params=backbone.parameters(),
lr=0.0001)
opt_pfc = torch.optim.SGD(
params=[{'params': module_partial_fc.parameters()}],
lr=0.0001,
momentum=0.9, weight_decay=5e-4)
scheduler_backbone = torch.optim.lr_scheduler.StepLR(opt_backbone, step_size=50, gamma=0.1)
scheduler_pfc = torch.optim.lr_scheduler.StepLR(opt_pfc, step_size=50, gamma=0.1)
start = time()
for epoch in range(20000):
error_num = 0
for batch_id, (image, label) in enumerate(train_dataloader):
if torch.cuda.is_available():
image = Variable(image.cuda())
label = Variable(label.cuda())
else:
image = Variable(image)
label = Variable(label)
features = F.normalize(backbone(image))
x_grad, loss_v, output = module_partial_fc.forward_backward(label, features, opt_pfc)
features.backward(x_grad)
opt_backbone.step()
opt_pfc.step()
module_partial_fc.update()
opt_backbone.zero_grad()
opt_pfc.zero_grad()
pred = output.argmax(dim=1)
correct = torch.eq(label, pred).sum().item()
# for index in range(0, len(pred)):
# if pred[index].item() != label[index].item():
# print("pred: {}\t label: {}".format(pred[index], label[index]))
error_num += len(label) - correct
scheduler_backbone.step()
scheduler_pfc.step()
print("epoch: {}\t error_num: {}\t".format(epoch, error_num))
if error_num == 0:
end = time()
torch.save(backbone.state_dict(), save_backbone_model_name)
module_partial_fc.save_params(save_weight_name_path, save_weight_mom_name_path)
print("epoch:{}, time:{}, save_backbone_model_name:{}".format(epoch, end - start, save_backbone_model_name))
break
if __name__ == '__main__':
# image_path_txt = "./texts/celeba_2048_0630_1.txt"
image_path_txt = "./texts/glint_2048_0929_1.txt"
# image_path_txt = "./texts/ijbc_2048_0929_1.txt"
dataset_name = "glint"
class_num = 2048
train_date = "0212"
index = 1
save_backbone_model_name = "./arcface_models/{}/{}_{}_{}_iresnet18_{}.pkl".format(dataset_name, dataset_name, class_num, train_date, index)
save_weight_name_path = "./arcface_models/{}/{}_{}_{}_softmax_weight_{}.pt".format(dataset_name, dataset_name, class_num, train_date, index)
save_weight_mom_name_path = "./arcface_models/{}/{}_{}_{}_softmax_weight_mom_{}.pt".format(dataset_name, dataset_name, class_num, train_date, index)
print(image_path_txt, dataset_name, class_num, train_date, index)
train_function(image_path_txt, class_num, save_backbone_model_name, save_weight_name_path, save_weight_mom_name_path)
|
from django.urls import reverse
ANONYMIZABLE_EVENTS = (
"added_participant",
"changed_owner",
"owner_left",
"removed_owner",
"participant_left",
"removed_participant",
)
def anonymize_event(user, event):
if event.event_type not in ANONYMIZABLE_EVENTS:
raise ValueError('event of type "%s" can\'t be ananymized' % event.event_type)
event.event_context = {
"user": {"id": None, "username": user.username, "url": reverse("misago:index")}
}
event.save(update_fields=["event_context"])
def anonymize_post_last_likes(user, post):
cleaned_likes = []
for like in post.last_likes:
if like["id"] == user.id:
cleaned_likes.append({"id": None, "username": user.username})
else:
cleaned_likes.append(like)
if cleaned_likes != post.last_likes:
post.last_likes = cleaned_likes
post.save(update_fields=["last_likes"])
|
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class UserFiles(models.Model):
class Meta:
verbose_name = "用户文件"
verbose_name_plural = "用户文件"
ordering = ['-last_submit_time']
is_ective_choices = (
(0, '活动'),
(1, '删除')
)
def info(self):
result = {}
result = {'id': self.id,
'name': self.user.last_name,
'pname': self.project_name,
'time': self.last_submit_time,
'url': self.file_path,
'upcount': self.up_count}
return result
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
is_ective = models.IntegerField(
choices=is_ective_choices,
default=0
)
project_name = models.CharField(
max_length=155,
)
file_path = models.CharField(
max_length=155,
verbose_name='文件',
)
last_submit_time = models.DateTimeField(
default=timezone.now
)
submit_count = models.IntegerField(
default=1
)
up_count = models.IntegerField(
default=0
)
def save(self, auto_now=True):
if auto_now:
self.last_submit_time = datetime.now()
super().save()
class UserUp(models.Model):
class Meta:
verbose_name = "UserUp"
verbose_name_plural = "UserUps"
project_id = models.ForeignKey(
UserFiles,
on_delete=models.CASCADE
)
up_user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
up_time = models.DateTimeField(
auto_now=True
)
|
import logging
import os
# prints log to stdout and also saves to specified log file
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def directory_maker(path): # makes directory if path does not exists
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
def get_logger(module_path, file_name = 'execution.log'):
module_path = module_path.split('/')
name = module_path[-1]
current_directory = '/'.join(module_path[:-1])
directory = current_directory+'/Logs/'
directory_maker(directory)
print(current_directory+'execution.log')
logging.basicConfig(filename = directory+'execution.log', format = LOG_FORMAT, level=logging.DEBUG)
logger = logging.getLogger(name)
return logger
logger = get_logger(__file__)
logger.info("This is information.")
# logger.error("This is Error!")
# logger.debug("This is debug...")
|
# coding=utf-8
import socket
# target_host = "www.baidu.com"
target_host = "127.0.0.1"
target_port = 80
# 建立一个socket对象(AF_INET:使用标准IPV4地址和主机名, SOCK_STREAM: TCP客户端)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 连接客户端
client.connect((target_host, target_port))
# 发送一些数据
client.send("GET / HTTP/1.1\r\nHost:baidu.com\r\n\r\n")
# 接收一些数据(4096个字符)
response = client.recv(4096)
print response
|
"""
This module provides wrapper functions for plot with matplotlib.
By using with other modules in "matdat" and "matpos", you can separate actions of plot
from data and layouting.
Functions
---------
set_tick_params
set_labels
set_grid
set_xlim
set_ylim
line
scatter
vlines
hlines
box
factor_box
factor_violin
velocity
band
text
"""
from .action import *
from .axes_style import *
from .scatter import scatter
from .line import line
from .vhlines import vlines
from .band import xband, yband
from .velocity import velocity
from .box import box, factor_box
from .violin import factor_violin
from .text import text
from .hist import hist
from .bar import bar, factor_bar
from .cycler import *
|
"""Auto-generated file, do not edit by hand. MY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MY = PhoneMetadata(id='MY', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[1369]\\d{2,4}', possible_length=(3, 4, 5)),
toll_free=PhoneNumberDesc(national_number_pattern='112|999', example_number='112', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='112|999', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0(?:[01348]|[569]\\d)|1(?:[02]|1[128]|311)|2(?:0[125]|[13-6]|2\\d{0,2})|3(?:09\\d|[1-35-79]\\d\\d?)|5(?:[12]\\d|454|5\\d\\d?|77|888|999?)|7(?:[136-9]\\d|[45]\\d\\d?)|8(?:18?|2|8[18])|9(?:[0-4]\\d|68|71|9[0679]))|3[23679]\\d{3}|66628|99[1-469]|1(?:3[5-7]|9[124])', example_number='100', possible_length=(3, 4, 5)),
standard_rate=PhoneNumberDesc(national_number_pattern='666\\d\\d', example_number='66600', possible_length=(5,)),
sms_services=PhoneNumberDesc(national_number_pattern='(?:3[23679]\\d|666)\\d\\d', example_number='32000', possible_length=(5,)),
short_data=True)
|
# -*- coding: utf-8 -*-
import os
import sima
from sima.ROI import ROIList
import numpy as np
def extract(fpath):
fdir = os.path.split(fpath)[0]
fname = os.path.splitext(os.path.split(fpath)[1])[0]
sima_mc_path = os.path.join(fdir, fname + '_mc.sima')
if not os.path.exists(sima_mc_path):
raise Exception('Data not motion corrected yet; can\'t extract ROI data')
rois = ROIList.load(os.path.join(fdir, fname + '_RoiSet.zip'),
fmt='ImageJ') # load ROIs as sima polygon objects (list)
dataset = sima.ImagingDataset.load(os.path.join(fdir, fname + '_mc.sima')) # reload motion-corrected dataset
dataset.add_ROIs(rois, 'from_ImageJ')
print('Extracting roi signals from %s' % fdir)
signals = dataset.extract(rois)
extracted_signals = np.asarray(signals['raw']) # turn signals list into an np array
np.save(os.path.join(fdir, fname + '_extractedsignals.npy'), extracted_signals)
print('Done with extracting roi signals') |
import jax.numpy as jnp
from jax import grad, hessian, jit, random
from jax.flatten_util import ravel_pytree
from jax.experimental.optimizers import l2_norm
from jax.nn import softmax
from jax.nn import log_softmax, logsumexp
key = random.PRNGKey(0)
def sigmoid(x):
return 0.5 * (jnp.tanh(x / 2) + 1)
# Outputs probability of a label being true.
def predict(W, b, inputs):
return jnp.dot(inputs, W) + b
# Build a toy dataset.
inputs = jnp.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]])
targets = jnp.array([1, 1, 2, 3])
def _one_hot(x, k, dtype=jnp.float32):
"""Create a one-hot encoding of x of size k."""
return jnp.array(x[:, None] == jnp.arange(k), dtype)
targets = _one_hot(targets, 3)
def loss(W, b):
logits = predict(W, b, inputs)
preds = logits - logsumexp(logits, axis=1, keepdims=True)
loss = -jnp.mean(jnp.sum(preds * targets, axis=1))
loss += 0.001* (l2_norm(W) + l2_norm(b))
return loss
# Initialize random model coefficients
key, W_key, b_key = random.split(key, 3)
W = random.normal(W_key, (3,3))
b = random.normal(b_key, (3,))
f = lambda W: predict(W, b, inputs)
print("loss:",loss(W, b))
for i in range(1000):
W_grad, b_grad = grad(loss, (0, 1))(W, b)
W = W -0.005*W_grad
b = b - 0.005 * b_grad
print("loss:",loss(W, b))
H = hessian(loss)(W, b)
h, _ = ravel_pytree(H)
eigen_vals = jnp.linalg.eigvals(h.reshape(9,9)).real
eigen_vals = sorted(eigen_vals, reverse=True)
print(eigen_vals) # should be all positive for convex function
#outputs: [0.2406996, 0.16962789, 0.13137847, 0.07919562, 0.037625454, 0.02834966, 0.00042202187, 0.00042201488, 0.00037239227] |
"""
This module provides functionality for plotting in jupyter (http://jupyter.org/) notebooks
based on dygraphs (http://dygraphs.com/) and pandas (https://pandas.pydata.org/).
"""
import json
import uuid
import pandas
from IPython.display import HTML
def dygraphplot(*dataframeandoptions):
"""
Plots the given dataframe in a jupyter notebook cell.
Keyword arguments:
dataframe: The input data for the plot. The input data is given as a dict. It contains the
pandas.DataFrame as value for key 'df' and an optional dict as value for the key 'opt'.
The first column of the data frame contains the x-axis data, while
the remaining columns contain the series data. All columns except the first one needs to
be parseable to numeric.
The dict contains the dygraph config options.
"""
html = """
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/dygraph/2.1.0/dygraph.min.css">
"""
for dfandoptions in dataframeandoptions:
df = dfandoptions['df']
options = dfandoptions.get('opt', {'legend': 'always'})
# Check all but the first columns. According to dygraphs spec, these columns must contain
# numeric values.
for col in df.columns.values[1:]:
try:
pandas.to_numeric(df[col])
except:
raise Exception("Dataframe contains non-numeric column: {}".format(col))
html = html+"""
<div id="{0}"></div>
<script type="text/javascript">
requirejs.config({{
paths: {{
"Dygraph": ["//cdnjs.cloudflare.com/ajax/libs/dygraph/2.1.0/dygraph.min"]
}}
}});
require(['Dygraph'], function(Dygraph){{
new Dygraph(document.getElementById("{0}"), "{1}", {2})
}})
</script>
""".format(
uuid.uuid4(),
df.to_csv(index=False).replace("\n", "\\n\"+\""),
json.dumps(options)
)
return HTML(html)
|
# Numpy and Maths
import numpy
import math
# Tensorflow
import tensorflow
def delete_nans_in_data(array):
length = len(array)
i = 0
while i != length:
if i >= len(array):
i += 1
continue
if math.isnan(numpy.min(array[i])) is True:
array = numpy.delete(array, i, axis=0)
i -= 1
i += 1
return (array)
def init_data(array):
open_data = delete_nans_in_data(array)
close_data = open_data[:, [0, 1, 2, 3, 4, 5]]
close_data = numpy.delete(close_data, 0, 0)
open_data = numpy.delete(open_data, len(open_data) - 1, 0)
return (open_data, close_data)
def normalize_data(x_array, y_array, scaler):
x_array = scaler.fit_transform(x_array)
y_array = scaler.fit_transform(y_array)
return (x_array, y_array)
def denormalize_data(x_array, y_array, scaler):
x_array = scaler.inverse_transform(x_array)
y_array = scaler.inverse_transform(y_array)
return (x_array, y_array)
def split_data(open_data: numpy.array, close_data: numpy.array):
x_train = open_data[0 : int(len(open_data) * 0.9)]
y_train = close_data[0 : int(len(close_data) * 0.9)]
x_test = open_data[int(len(open_data) * 0.9) : len(open_data)]
y_test = close_data[int(len(close_data) * 0.9) : len(close_data)]
return x_train, y_train, x_test, y_test
def shuffle_data(x_train: numpy.array, y_train: numpy.array):
dataset = tensorflow.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.shuffle(len(y_train), reshuffle_each_iteration=True)
(shuffle_open_data, shuffle_close_data) = list(zip(*list(dataset.as_numpy_iterator())))
return numpy.array(shuffle_open_data), numpy.array(shuffle_close_data) |
#! python3
"""
pisat.core.logger.sensor_controller
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A controller of multiple Sensor and Adapter classes.
This class integrates Sensor and Adapter classes and
control as two types of classes make cooperation.
This means that SensorController operates some kind of
transaction about retrieving multiple data with
sets of Sensors and Adapters.
This class is a ComponentGroup.
In most cases, a user should use the class to be wrapped
in DataLogger class beacause SensorController have no
inner container for holding data logged like LogQueue.
[info]
pisat.core.logger.DataLogger
pisat.core.logger.LogQueue
"""
from typing import Dict, Generic, Set, Type, TypeVar, Optional
from pisat.base.component_group import ComponentGroup
from pisat.model.linked_datamodel import LinkedDataModelBase
from pisat.sensor.sensor_base import SensorBase
LinkedModel = TypeVar("LinkedModel")
class SensorGroup(ComponentGroup, SensorBase, Generic[LinkedModel]):
"""Controller of SensorGruop and AdapterGroup.
A controller of multiple Sensor and Adapter classes.
This class integrates Sensor and Adapter classes and
control as two types of classes make cooperation.
This means that SensorController operates some kind of
transaction about retrieving multiple data with
sets of Sensors and Adapters.
This class is a ComponentGroup.
In most cases, a user should use the class to be wrapped
in DataLogger class beacause SensorController have no
inner container for holding data logged like LogQueue.
See Also
--------
pisat.core.logger.DataLogger : Wrapper class of this class.
pisat.core.logger.LogQueue : Container class of logged data.
"""
def __init__(self,
modelclass: Type[LinkedModel],
*sensors: SensorBase,
name: Optional[str] = None):
"""
Parameters
----------
sgroup : SensorGroup
SensorGroup of Sensor objects.
agroup : Optional[AdapterGroup], optional
AdapterGroup of Adapter objects, by default None
name : Optional[str], optional
name of this Component, by default None
"""
ComponentGroup.__init__(self, name=name)
if not issubclass(modelclass, LinkedDataModelBase):
raise TypeError(
"'modelclass' must be a subclass of LinkedDataModelBase."
)
self._sensors: Set[SensorBase] = set()
self._modelclass = modelclass
self.append(*sensors)
def __len__(self):
return len(self._sensors)
def append(self, *sensors: SensorBase):
"""Append sensors inside.
Parameters
----------
*sensors : Tuple[SensorBase, ...]
Sensors to be included inside.
Raises
------
NotImplementedError
Raised if given sensors are not instances of SensorBase.
"""
super().append(*sensors)
for sensor in sensors:
if not isinstance(sensor, SensorBase):
raise NotImplementedError(
"Components of 'sensors' must be SensorBase."
)
self._sensors.add(sensor)
def remove(self, sensor: SensorBase):
"""Remove given sensor from the group.
Parameters
----------
sensor : SensorBase
Sensor to be removed.
Raises
------
ValueError
Raised if the given sensor is not included in the sensor group.
"""
try:
self._sensors.remove(sensor)
except KeyError:
raise ValueError("The SensorGroup doesn't have the sensor.")
@property
def model(self):
return self._modelclass
def read(self) -> LinkedModel:
"""Read data of sensor as a dictionary.
Returns
-------
LinkedDataModelBase
A data model which has retrieved data from the sensor.
"""
if self._modelclass is None:
raise AttributeError(
"No model has been set now."
)
model = self._modelclass(self.name)
data = [sensor.read() for sensor in self._sensors]
model.sync(*data)
return model
def get_sensors(self) -> Dict[str, SensorBase]:
"""Search Sensor objects from data name.
Parameters
----------
dname : str
Data name to search.
Returns
-------
List[SensorBase]
Searched Sensor objects.
See Also
--------
pisat.sensor.sensor.SensorGroup : SensorGroup.get_sensor is used inside.
"""
return {sensor.name: sensor for sensor in self._sensors}
|
import socket
import time
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
from CondCore.CondDB.CondDB_cfi import *
from Configuration.AlCa.autoCond import autoCond
options = VarParsing.VarParsing()
options.register('connectionString',
'frontier://FrontierProd/CMS_CONDITIONS', #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"GlobalTag Connection string")
options.register('globalTag',
autoCond['run2_data'], #default value
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"GlobalTag")
options.register( 'runNumber'
, 1 #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.int
, "Run number to be uploaded."
)
options.register( 'destinationConnection'
, 'sqlite_file:EcalADCToGeVConstant_EDAnalyzer_test.db' #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, "Connection string to the DB where payloads will be possibly written."
)
options.register( 'tag'
, 'EcalADCToGeVConstant_EDAnalyzer_test'
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.string
, "Tag written in destinationConnection and finally appended onto the tag in connectionString."
)
options.register( 'currentThreshold'
, 18000.
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.float
, "The threshold on the magnet current for considering a switch of the magnetic field."
)
options.register( 'messageLevel'
, 0 #default value
, VarParsing.VarParsing.multiplicity.singleton
, VarParsing.VarParsing.varType.int
, "Message level; default to 0."
)
options.parseArguments()
CondDBConnection = CondDB.clone( connect = cms.string( options.connectionString ) )
CondDBConnection.DBParameters.messageLevel = cms.untracked.int32( options.messageLevel )
DestConnection = CondDB.clone( connect = cms.string( options.destinationConnection ) )
DestConnection.DBParameters.messageLevel = cms.untracked.int32( options.messageLevel )
process = cms.Process( "EcalADCToGeVConstantWriter" )
process.MessageLogger = cms.Service( "MessageLogger"
, destinations = cms.untracked.vstring( 'cout' )
, cout = cms.untracked.PSet( threshold = cms.untracked.string( 'INFO' ) )
)
if options.messageLevel == 3:
#enable LogDebug output: remember the USER_CXXFLAGS="-DEDM_ML_DEBUG" compilation flag!
process.MessageLogger.cout = cms.untracked.PSet( threshold = cms.untracked.string( 'DEBUG' ) )
process.MessageLogger.debugModules = cms.untracked.vstring( '*' )
process.source = cms.Source( "EmptySource",
firstRun = cms.untracked.uint32( options.runNumber ),
firstTime = cms.untracked.uint64( ( long( time.time() ) - 24 * 3600 ) << 32 ), #24 hours ago in nanoseconds
numberEventsInRun = cms.untracked.uint32( 1 ),
numberEventsInLuminosityBlock = cms.untracked.uint32( 1 )
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32( 1 ) )
process.GlobalTag = cms.ESSource( "PoolDBESSource",
CondDBConnection,
globaltag = cms.string( options.globalTag ),
toGet = cms.VPSet()
)
process.GlobalTag.toGet.append( cms.PSet( record = cms.string( "EcalADCToGeVConstantRcd" ),
label = cms.untracked.string( "0T" ),
tag = cms.string( "EcalADCToGeVConstant_0T_test0" ),
connect = cms.string( "frontier://FrontierPrep/CMS_CONDITIONS" ),
)
)
process.GlobalTag.toGet.append( cms.PSet( record = cms.string( "EcalADCToGeVConstantRcd" ),
label = cms.untracked.string( "38T" ),
tag = cms.string( "EcalADCToGeVConstant_3.8T_test0" ),
connect = cms.string( "frontier://FrontierPrep/CMS_CONDITIONS" ),
)
)
process.PoolDBOutputService = cms.Service( "PoolDBOutputService"
, DestConnection
, timetype = cms.untracked.string( 'runnumber' )
, toPut = cms.VPSet( cms.PSet( record = cms.string( 'EcalADCToGeVConstantRcd' )
, tag = cms.string( options.tag )
)
)
)
process.ecalADCToGeVConstantBTransition = cms.EDAnalyzer( "EcalADCToGeVConstantBTransitionAnalyzer"
, currentThreshold = cms.untracked.double( options.currentThreshold )
)
process.p = cms.Path( process.ecalADCToGeVConstantBTransition )
|
import config
import torch
import flask
import time
from flask import Flask
from flask import request
from model import BERTBaseUncased
import functools
app = Flask(__name__)
MODEL = None
DEVICE = "cuda"
PREDICTION_DICT = dict()
def sentence_prediction(sentence):
tokenizer = config.TOKENIZER
max_len = config.MAX_LEN
review = str(sentence)
review = " ".join(review.split())
inputs = tokenizer.encode_plus(
review, None, add_special_tokens=True, max_length=max_len
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
padding_length = max_len - len(ids)
ids = ids + ([0] * padding_length)
mask = mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
ids = torch.tensor(ids, dtype=torch.long).unsqueeze(0)
mask = torch.tensor(mask, dtype=torch.long).unsqueeze(0)
token_type_ids = torch.tensor(token_type_ids, dtype=torch.long).unsqueeze(0)
ids = ids.to(DEVICE, dtype=torch.long)
token_type_ids = token_type_ids.to(DEVICE, dtype=torch.long)
mask = mask.to(DEVICE, dtype=torch.long)
outputs = MODEL(ids=ids, mask=mask, token_type_ids=token_type_ids)
outputs = torch.sigmoid(outputs).cpu().detach().numpy()
return outputs[0][0]
@app.route("/predict")
def predict():
sentence = request.args.get("sentence")
start_time = time.time()
positive_prediction = sentence_prediction(sentence)
negative_prediction = 1 - positive_prediction
response = {}
response["response"] = {
"positive": str(positive_prediction),
"negative": str(negative_prediction),
"sentence": str(sentence),
"time_taken": str(time.time() - start_time),
}
return flask.jsonify(response)
if __name__ == "__main__":
MODEL = BERTBaseUncased()
MODEL.load_state_dict(torch.load(config.MODEL_PATH))
MODEL.to(DEVICE)
MODEL.eval()
app.run() |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] run_control={"frozen": false, "read_only": false}
# [](https://neutronimaging.pages.ornl.gov/tutorial/notebooks/bragg_edge_normalization_and_profile_extractor/#activate-search)
# <img src='__docs/__all/notebook_rules.png' />
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select Your IPTS
# + run_control={"frozen": false, "read_only": false}
from __code import system
from __code.bragg_edge.bragg_edge_normalization import BraggEdge
from __code.bragg_edge.bragg_edge import Interface
system.System.select_working_dir(facility='SNS', instrument='SNAP')
from __code.__all import custom_style
custom_style.style()
from plotly.offline import plot, init_notebook_mode, iplot
init_notebook_mode()
# -
# ## Prepare UI engine
# + run_control={"frozen": false, "read_only": false}
# %gui qt
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Select Raw Data Input Folder
#
# Data and time spectra files will be loaded
# + run_control={"frozen": false, "read_only": false}
o_bragg = BraggEdge(working_dir=system.System.get_working_dir())
o_bragg.select_working_folder()
# -
# # Select Open Beam Input Folder
o_bragg.select_ob_folder()
# # Normalization
# + [markdown] run_control={"frozen": false, "read_only": false}
# ### Select how many random files to use to select sample position
# + run_control={"frozen": false, "read_only": false}
o_bragg.how_many_data_to_use_to_select_sample_roi()
# + [markdown] run_control={"frozen": false, "read_only": false}
# ### Select background region
# -
# In order to improve the normalization you have the option to select a region in your images that **you know for sure is away from the sample**. The algorithm will use that **background** region to match it with the same region of the open beam (OB) images.
# + run_control={"frozen": false, "read_only": false}
o_interface = Interface(data=o_bragg.get_image_to_use_for_display())
o_interface.show()
# -
# ## Perform normalization
o_bragg.normalization(list_rois=o_interface.roi_selected)
# ## Export normalized data
o_bragg.export_normalized_data()
# # Calculate Bragg edge profile
# + [markdown] run_control={"frozen": false, "read_only": false}
# ## Define Experiment Setup
# + run_control={"frozen": false, "read_only": false}
o_bragg.exp_setup()
# -
# ## Define the position of your sample
o_interface_sample = Interface(data=o_bragg.final_image)
o_interface_sample.show()
# ## Calculate signal of sample region
# o_bragg.calculate_counts_vs_file_index_of_regions_selected(list_roi=o_interface.list_roi)
o_bragg.calculate_counts_vs_file_index_of_regions_selected(list_roi=o_interface_sample.roi_selected)
o_bragg.load_time_spectra()
# + [markdown] run_control={"frozen": false, "read_only": false}
# # Display Bragg Edges vs Signal
# -
# Run the next cell **only if** you want to display the signal Counts vs lambda
# + run_control={"frozen": false, "read_only": false}
o_bragg.plot()
# -
# # Export ASCII Data
o_bragg.select_output_data_folder()
|
"""nturesell URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
from . import settings
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls import include, url
from chat import views as chatView
urlpatterns = [
path('', views.home),
# path('sell/',views.sell),
path('logout/',views.logout),
path('home/', views.home, name = "home"),
path('register/', views.register),
path('login/', views.login),
path('event/',views.event),
# path('event/', views.profile),
# path('productdetail',views.productdetail),
# path('editproduct',views.editproduct),
# path('boughthistory',views.boughthistory),
# url(r'^chat/', include('chat.urls')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import torch
import torch.nn as nn
import numpy as np
import logging
import pytorch_lightning as pl
import sys
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score
from torch.utils.data import DataLoader, TensorDataset
from torchcontrib.optim import SWA
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
def get_attn_mask(attn_shape, a, device):
sub_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
lines = a[...,-3:]
for i in range(sub_mask.shape[0]):
temp = sub_mask[i]; lines_t = lines[i]
# iterate through each elem in the array and zero out :min(arr)
Tmax = 0
for j in range(3):
line = np.where(lines_t[:,j] == 1.)[0]
if len(line) == 0:
continue
Tmax = max(list(line))+1
if min(list(line)) == 0:
continue
for idx in list(line):
temp[idx,:min(list(line))] = 1
temp[Tmax:] = 1
sub_mask[i] = temp
if device is not None:
Am = (torch.from_numpy(sub_mask) == 0).to(device)
else:
Am = (torch.from_numpy(sub_mask) == 0)
return Am
def resample(data, device=None, strategy='random'):
''' Good effort, lol....
# B, X, A, M, Y, CE = data.tensors
# y = pt_numpy(Y)
# mclass_idxs = np.where(y == np.bincount(y).argmin())[0]
# num_lfreq_class = len(mclass_idxs)
# num_mfreq_class = len(np.where(y == np.bincount(y).argmax())[0]) # find most frequent element and number of times it occurs
# num_iters = int(np.ceil(num_mfreq_class / num_lfreq_class))
# num_classes = len(np.bincount(y))
# if device is not None:
# Bn = torch.zeros((num_iters*num_lfreq_class*num_classes, B.shape[1])).to(device)
# Xn = torch.zeros((num_iters*num_lfreq_class*num_classes, X.shape[1], X.shape[2])).to(device)
# Mn = torch.zeros((num_iters*num_lfreq_class*num_classes, M.shape[1], M.shape[2])).to(device)
# An = torch.zeros((num_iters*num_lfreq_class*num_classes, A.shape[1], A.shape[2])).to(device)
# Yn = torch.zeros((num_iters*num_lfreq_class*num_classes,),dtype=torch.long).to(device)
# CEn = torch.zeros((num_iters*num_lfreq_class*num_classes, CE.shape[1])).to(device)
# else:
# Bn = torch.zeros((num_iters*num_lfreq_class*num_classes, B.shape[1]))
# Xn = torch.zeros((num_iters*num_lfreq_class*num_classes, X.shape[1], X.shape[2]))
# Mn = torch.zeros((num_iters*num_lfreq_class*num_classes, M.shape[1], M.shape[2]))
# An = torch.zeros((num_iters*num_lfreq_class*num_classes, A.shape[1], A.shape[2]))
# Yn = torch.zeros((num_iters*num_lfreq_class*num_classes,),dtype=torch.long)
# CEn = torch.zeros((num_iters*num_lfreq_class*num_classes, CE.shape[1]))
# zidxs = list(np.where(y == 0.)[0])
# oidxs = list(np.where(y == 1.)[0])
# tidxs = list(np.where(y == 2.)[0])
# def get_ids(idxs, l, u):
# if u < l:
# ids = idxs[l:] + idxs[:u]
# else:
# ids = idxs[l:u]
# return ids
# for it in range(num_iters):
# mi = it*num_classes*num_lfreq_class; miP1 = ((it*num_classes)+num_classes)*num_lfreq_class
# zl = np.mod(it*num_lfreq_class,len(zidxs)); zu = np.mod((it+1)*num_lfreq_class-1,len(zidxs))
# ol = np.mod(it*num_lfreq_class,len(oidxs)); ou = np.mod((it+1)*num_lfreq_class-1,len(oidxs))
# tl = np.mod(it*num_lfreq_class,len(tidxs)); tu = np.mod((it+1)*num_lfreq_class-1,len(tidxs))
# zids = get_ids(zidxs, zl, zu+1)
# oids = get_ids(oidxs, ol, ou+1)
# tids = get_ids(tidxs, tl, tu+1)
# idxs = zids + oids + tids
# print(idxs)
# Bn[mi:miP1] = B[idxs]; Xn[mi:miP1] = X[idxs]; Mn[mi:miP1] = M[idxs]; An[mi:miP1] = A[idxs]; Yn[mi:miP1] = Y[idxs]
# CEn[mi:miP1] = CE[idxs]
# if oversample:
# y_vals_sort = self.ddata[fold][tvt]['ys_seq'][:,0][idx_sort]
# class_count = np.unique(y_vals_sort, return_counts=True)[1]
# print(class_count)
# weight = 1. / class_count
# samples_weight = weight[y_vals_sort]
# if device is not None:
# samples_weight = torch.from_numpy(samples_weight).to(device)
# else:
# samples_weight = torch.from_numpy(samples_weight)
# sampler = WeightedRandomSampler(samples_weight, len(samples_weight))
'''
B, X, A, M, Y, CE = data.tensors
Bnp = pt_numpy(B); Ynp = pt_numpy(Y)
if strategy == 'random':
sampler = RandomOverSampler(random_state=0)
Bnp, Ynp = sampler.fit_resample(Bnp, Ynp)
new_idxs = sampler.sample_indices_
num_examples = len(new_idxs)
if device is not None:
Bn = torch.from_numpy(Bnp.astype('float32')).to(device)
Xn = torch.zeros((num_examples, X.shape[1])).to(device)
Mn = torch.zeros((num_examples, M.shape[1], M.shape[2])).to(device)
An = torch.zeros((num_examples, A.shape[1], A.shape[2])).to(device)
Yn = torch.from_numpy(Ynp.astype('int64')).to(device)
CEn = torch.zeros((num_examples, CE.shape[1])).to(device)
else:
Bn = torch.from_numpy(Bnp.astype('float32'))
Xn = torch.zeros((num_examples, X.shape[1]))
Mn = torch.zeros((num_examples, M.shape[1], M.shape[2]))
An = torch.zeros((num_examples, A.shape[1], A.shape[2]))
Yn = torch.from_numpy(Ynp.astype('int64'))
CEn = torch.zeros((num_examples, CE.shape[1]))
Xn = X[new_idxs]; Mn = M[new_idxs]; An = A[new_idxs]; CEn = CE[new_idxs]
shuffle_idxs = [x for x in range(num_examples)]
np.random.shuffle(shuffle_idxs)
Bn = Bn[shuffle_idxs]; Xn = Xn[shuffle_idxs]; An = An[shuffle_idxs]
Mn = Mn[shuffle_idxs]; Yn = Yn[shuffle_idxs]; CEn = CEn[shuffle_idxs]
elif strategy == 'smote':
sampler = SMOTE()
raise NotImplementedError()
elif strategy == 'adasyn':
sampler = ADASYN()
raise NotImplementedError()
return TensorDataset(Bn, Xn, An, Mn, Yn, CEn)
def get_masks(M):
m_t = ((torch.flip(torch.cumsum(torch.flip(M.sum(-1), (1,)), 1), (1,))>1.)*1)
m_g_t = (m_t.sum(-1)>1)*1.
lens = m_t.sum(-1)
return m_t, m_g_t, lens
def masked_gaussian_nll_3d(x, mu, std, mask):
nll = 0.5*np.log(2*np.pi) + torch.log(std)+((mu-x)**2)/(2*std**2)
masked_nll = (nll*mask)
return masked_nll
def apply_reg(p, reg_type='l2'):
if reg_type == 'l1':
return torch.sum(torch.abs(p))
elif reg_type=='l2':
return torch.sum(p.pow(2))
else:
raise ValueError('bad reg')
def pt_numpy(tensor):
return tensor.detach().cpu().numpy()
def calc_stats(preds, tensors):
B, X, A, M, Y, CE = tensors
if Y.shape[-1]>1:
Y_oh = Y.detach().cpu().numpy()
bin_preds = self.prediction.detach().cpu().numpy()
Y_np = bin_preds[np.argmax(Y_oh,-1)]
else:
Y_np = Y.detach().cpu().numpy().ravel()
CE_np = CE.detach().cpu().numpy().ravel()
preds_np = preds.detach().cpu().numpy().ravel()
event_obs = (1.-CE_np).ravel()
idx = np.where(event_obs>0)[0]
mse = np.square(Y_np[idx]-preds_np[idx]).mean()
r2 = r2_score(Y_np[idx], preds_np[idx])
ci = concordance_index(Y_np, preds_np, event_obs)
return mse, r2, ci |
import os
import uuid
import boto3
import moto
import pytest
from commercetools_token_refresher.rotator import TokenRotator
@moto.mock_secretsmanager
def test_rotator_not_enabled_rotation():
"""Super basic test, should be expanded."""
secret_name = "mock/ct-access-token"
secrets_manager = boto3.client(
"secretsmanager", region_name=os.environ["AWS_DEFAULT_REGION"]
)
# rotation not support in moto 1.3.14, so test that it errors out on that.
secrets_manager.create_secret(
Name=secret_name,
SecretString="{}",
Tags=[{"Key": "sm_client_arn", "Value": "some arn"}],
)
client_request_token = str(uuid.uuid4())
with pytest.raises(ValueError):
TokenRotator(
"http://localhost/oauth/token",
"http://localhost",
secret_name,
client_request_token,
)
|
import os
import re
import sys
import lxml.etree
'''
This script takes SDL2 doxygen documentation and annotates the members with it.
How to use:
1. Clone the libsdl repository
2. Go into the docs folder, open the doxyfile and add this line:
GENERATE_XML=YES
3. Run doxygen
4. Amalgate the generated XML using the command on top of the output/xml/combine.xslt file
5. Use the generated XML file (all.xml) as a parameter to this script
'''
print("Loading XML file...")
xmlFile = lxml.etree.parse(sys.argv[1])
print("done.")
def createSummaryTag(summary):
if "\n" in summary:
return "<summary>\n{}\n</summary>".format(summary)
elif len(summary) > 0:
return "<summary>{}</summary>".format(summary)
else:
return ""
def get_doc(api, additional_summary=None):
lines = []
doc = xmlFile.xpath(
".//memberdef[@kind='function']/name[text()='{}']/..".format(api))
if len(doc) == 0:
if additional_summary is not None:
return createSummaryTag(additional_summary)
else:
return ""
element = doc[0]
brief = element.xpath("./briefdescription//text()")
detailed = element.xpath("./detaileddescription/para/text()")
summary = "\n\n".join(filter(lambda x: len(x) > 0, ["".join(brief), "".join(detailed)])).strip()
summary = re.sub(r"\n\s*\n", "\n", summary)
if additional_summary is not None:
summary += "\n\n" + additional_summary
lines.append(createSummaryTag(summary))
params = element.xpath("./detaileddescription//parameteritem")
for param in params:
name = "".join(param.xpath(".//parametername/text()"))
desc = "".join(param.xpath(".//parameterdescription//text()")).strip()
if len(desc) > 0:
lines.append('<param name="{}">{}</param>'.format(name, desc))
# parameters: List[BeautifulSoup] = content.find("h2", text="Function Parameters").findNextSibling("div").findAll("tr")
# for param in parameters:
# name, desc = param.findAll("td")
# lines.append('///<param name="{}">{}</param>'.format(name.text, desc.text))
result = "\n".join(lines)
return result
dir = "SharpSDL2/src/SDL/"
dllImportRegex = re.compile(
r"([ \t]*)" + r"(\[DllImport.*\WEntryPoint = \"(.*)\".*\])".replace(" ", r"\s*"))
commentRegex = re.compile(r"/\*((?:.|\n)*?)\*/\s*\n")
for file in os.listdir(dir):
print(file)
with open(dir + file) as fp:
text = fp.read()
comments = list(commentRegex.finditer(text))
commentsByEndIndex = {comment.end(): comment.group(1) for comment in comments}
def replace(match):
ws, dllimport, entrypoint = match.groups()
print("Getting the doc for {}...".format(entrypoint))
comment = commentsByEndIndex.get(match.start(), None)
if comment is not None:
comment_lines = comment.strip().split("\n")
comment_lines = [line.strip().lstrip("*").strip() for line in comment_lines]
comment = "\n".join(comment_lines)
comment = "Binding info:\n" + comment
doc = get_doc(entrypoint, additional_summary=comment)
print(doc)
if len(doc) > 0:
doc_reformatted = "\n".join(ws + "/// " + line for line in doc.split("\n"))
return doc_reformatted + "\n" + ws + dllimport
else:
return ws + dllimport
replacement = dllImportRegex.sub(replace, text)
print(replacement)
with open(dir + file, "w") as fp:
fp.write(replacement)
|
"""Module & package import."""
from flask import (
Blueprint,
render_template,
url_for,
redirect,
flash,
request,
)
from flask_login import login_required
from online_store import db
from online_store.forms import AddProductForm
from online_store.admin.utils import admin_required
from online_store.users.utils import save_image
from online_store.models import Product
admin = Blueprint("admin", __name__)
@admin.route("/admin", methods=["GET", "POST"])
@login_required
@admin_required
def show_admin(editing=False):
"""Admin page where items can be added to db."""
form = AddProductForm()
if form.validate_on_submit():
image_file = save_image(form.image.data, 1200, "assets")
new_product = Product(
title=form.title.data,
price=form.price.data,
description=form.description.data,
media=form.media.data,
size=form.size.data,
quantity=form.quantity.data,
image=image_file,
)
db.session.add(new_product)
db.session.commit()
flash("Product added, thank you!")
return redirect(url_for("admin.show_admin"))
products = Product.query.order_by(Product.date_created).all()
context = {
"products": products,
"title": "Admin",
"form": form,
"editing": editing,
}
return render_template("admin.html", **context)
@admin.route("/admin-edit/<product_id>", methods=["POST", "GET"])
def edit_product(product_id, editing=True):
"""Edit product details."""
form = AddProductForm(True)
product = Product.query.filter_by(id=product_id).first()
if form.validate_on_submit() and form.editing:
image_file = save_image(form.image.data, 1200, "assets")
product.title = form.title.data
product.price = form.price.data
product.description = form.description.data
product.media = form.media.data
product.size = form.size.data
product.quantity = form.quantity.data
product.image = image_file
db.session.commit()
print("Succesfully updated!")
return redirect(url_for("admin.show_admin"))
products = Product.query.order_by(Product.date_created).all()
print(f"Products: {products}")
context = {
"products": products,
"product": product,
"title": "Edit Product",
"form": form,
"editing": editing,
}
print(f"Context: {context}")
return render_template("admin.html", **context)
@admin.route("/admin-delete/<product_id>")
def delete_product(product_id):
"""Delete products from database."""
try:
product_to_delete = Product.query.filter_by(id=product_id).first()
db.session.delete(product_to_delete)
db.session.commit()
return redirect(url_for("admin.show_admin"))
except (TypeError, ValueError):
print("Something went wrong deleting this product.")
return redirect(url_for("admin.show_admin"))
|
from __future__ import print_function
import argparse
import sys
import os.path
from glob import glob
import ssg.build_yaml
class ResolvableProfile(ssg.build_yaml.Profile):
def __init__(self, * args, ** kwargs):
super(ResolvableProfile, self).__init__(* args, ** kwargs)
self.resolved = False
def resolve(self, all_profiles):
if self.resolved:
return
resolved_selections = set(self.selected)
if self.extends:
if self.extends not in all_profiles:
msg = (
"Profile {name} extends profile {extended}, but"
"only profiles {known_profiles} are available for resolution."
.format(name=self.id_, extended=self.extends,
profiles=list(all_profiles.keys())))
raise RuntimeError(msg)
extended_profile = all_profiles[self.extends]
extended_profile.resolve(all_profiles)
extended_selects = set(extended_profile.selected)
resolved_selections.update(extended_selects)
updated_variables = dict(extended_profile.variables)
updated_variables.update(self.variables)
self.variables = updated_variables
updated_refinements = dict(extended_profile.refine_rules)
updated_refinements.update(self.refine_rules)
self.refine_rules = updated_refinements
for uns in self.unselected:
resolved_selections.discard(uns)
self.unselected = []
self.selected = sorted(resolved_selections)
self.resolved = True
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("profile_file", nargs="*")
parser.add_argument(
"--build-config-yaml",
help="YAML file with information about the build configuration. "
"e.g.: ~/scap-security-guide/build/build_config.yml "
"needed for autodetection of profile root"
)
parser.add_argument(
"--product-yaml",
help="YAML file with information about the product we are building. "
"e.g.: ~/scap-security-guide/rhel7/product.yml "
"needed for autodetection of profile root"
)
parser.add_argument(
"--output", "-o", default="{name}.profile",
help="The template for saving processed profile files."
)
return parser
def make_name_to_profile_mapping(profile_files, env_yaml):
name_to_profile = {}
for f in profile_files:
try:
p = ResolvableProfile.from_yaml(f, env_yaml)
name_to_profile[p.id_] = p
except Exception as exc:
# The profile is probably doc-incomplete
msg = "Not building profile from {fname}: {err}".format(
fname=f, err=str(exc))
print(msg, file=sys.stderr)
return name_to_profile
def get_env_yaml(build_config_yaml, product_yaml):
if build_config_yaml is None or product_yaml is None:
return None
env_yaml = ssg.yaml.open_environment(build_config_yaml, product_yaml)
return env_yaml
def get_profile_files_from_root(env_yaml, product_yaml):
profile_files = []
if env_yaml:
base_dir = os.path.dirname(product_yaml)
profiles_root = ssg.utils.required_key(env_yaml, "profiles_root")
profile_files = glob("{base_dir}/{profiles_root}/*.profile"
.format(profiles_root=profiles_root, base_dir=base_dir))
return profile_files
def main():
parser = create_parser()
args = parser.parse_args()
env_yaml = get_env_yaml(args.build_config_yaml, args.product_yaml)
profile_files = get_profile_files_from_root(env_yaml, args.product_yaml)
profile_files.extend(args.profile_file)
profiles = make_name_to_profile_mapping(profile_files, env_yaml)
for pname in profiles:
profiles[pname].resolve(profiles)
for name, p in profiles.items():
p.dump_yaml(args.output.format(name=name))
if __name__ == "__main__":
main()
|
""" runner script to investigate mode preconditioner """
import os
from math import pi
import xml.etree.ElementTree as ET
import platform_paths as pp
import manipulator as ma
# load parameter file
TREE = ET.parse('../XML/parameterSHL.xml')
ROOT = TREE.getroot()
ma.set_parameter(ROOT, 'withoutput', 1)
# make executable ready
EXE = 'modeConvDiff'
os.chdir(pp.EXE_PATH)
os.system('make ' + EXE + ' -j4')
ST = 1./30.
RE = 300.
DX = 1
DY = 4
DZ = 1
#
NPX = 1
NPY = 2
NPZ = 4
NPF = 1
#
LXO = 22.5
LYO = 600.
LZO = 150.
#
NXO = 97
NYO = 1537
NZO = 513
#
NX = 65
NY = (1025-1)/2/DY+1
# NZ = (385-1)/2/DZ+1
NZ = 129
NZ = 129
#
LX = LXO
LX = round(LXO*1.5/(NXO-1)*(NX-1), 1)
LY = round(LYO*1.5/(NYO-1)*(NY-1), 1)
LZ = round(LZO*1.5/(NZO-1)*(NZ-1), 1)
#
print('NX', NX)
print('NY', NY)
print('NZ', NZ)
#
print('LX', LX)
print('LY', LY)
print('LZ', LZ)
#
print('DX', LX/LXO*(NXO-1)/(NX-1))
print('DY', LY/LYO*(NYO-1)/(NY-1))
print('DZ', LZ/LZO*(NZO-1)/(NZ-1))
#
#
ma.set_parameter(ROOT, 'Re', RE)
ma.set_parameter(ROOT, 'alpha2', 2.*pi*ST*RE)
ma.set_parameter(ROOT, 'lx', LX)
ma.set_parameter(ROOT, 'ly', LY)
ma.set_parameter(ROOT, 'lz', LZ)
ma.set_parameter(ROOT, 'origin z', LZ/2.)
ma.set_parameter(ROOT, 'nx', NX)
ma.set_parameter(ROOT, 'ny', NY)
ma.set_parameter(ROOT, 'nz', NZ)
ma.set_parameter(ROOT, 'nf', 1)
ma.set_parameter(ROOT, 'npx', NPX)
ma.set_parameter(ROOT, 'npy', NPY)
ma.set_parameter(ROOT, 'npz', NPZ)
ma.set_parameter(ROOT, 'npf', 1)
ma.set_parameter(ROOT, 'Maximum Iterations', 20)
# ma.set_parameter(ROOT, 'Convergence Tolerance', 1.e-6)
ma.set_parameter(ROOT, 'Output Frequency', 1)
PRECS = [1]
# PRECS = [6, 7]
# PRECS = [2, 6, 7]
# PRECS = [5, 3, 6, 7]
# OMEGAS = [0.5, 1., 1.5]
# OMEGAS = [0.1, 0.2, 0.3, 0.4, 0.5]
OMEGAS = [0.1, 0.3, 0.5]
# OMEGAS = [0.5]
CASE_PATH = ['']*3
for side in ['right']:
CASE_PATH[0] = pp.DATA_PATH + '/SHL_mode_prec'
pp.mkdir(CASE_PATH, 0)
for prec in PRECS:
CASE_PATH[1] = '/prec_'+str(prec)
pp.mkdir(CASE_PATH, 1)
pp.chdir(CASE_PATH, 1)
for omega in OMEGAS:
CASE_PATH[2] = '/omega_'+str(omega)
pp.mkdir(CASE_PATH, 2)
pp.chdir(CASE_PATH, 2)
#
# ma.set_parameter(ROOT, 'preconditioner', 'none')
ma.set_parameter(ROOT, 'type', prec)
ma.set_parameter(ROOT, 'omega', omega)
TREE.write('parameter3D.xml')
nptot = NPX*NPY*NPZ
print()
print(CASE_PATH)
exe_str = \
pp.exe_pre(nptot,
' -N -W 1:00 -R "rusage[mem=' +
str(1024*2) + ']" ') + \
pp.EXE_PATH+'/'+EXE+' --realCase=1 '
print(exe_str)
os.system(exe_str)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.