content
stringlengths 5
1.05M
|
|---|
def task_report_deps():
"""report dependencies and changed dependencies to a file
"""
return {
'file_dep': ['req.in', 'req-dev.in'],
'actions': ['echo D: %(dependencies)s, CH: %(changed)s > %(targets)s'],
'targets': ['report.txt'],
}
|
def binary_search(ds, low, high, key):
print('ds:',ds)
print('low:', low)
print('high:', high)
mid = (high + low) // 2
if low > high:
return None
elif key == ds[mid]:
return mid
elif key < ds[mid]:
return binary_search(ds, low, mid - 1, key)
else:
return binary_search(ds, mid + 1, high, key)
li = [1, 2, 3, 4, 5]
print(binary_search(li, 0, len(li) - 1, 3))
|
import unittest
from kubernetes.client import V1APIResource
from k8s_handle.exceptions import ProvisioningError
from k8s_handle.transforms import split_str_by_capital_letters
from .adapters import Adapter, AdapterBuiltinKind, AdapterCustomKind
from .mocks import K8sClientMock, CustomObjectsAPIMock, ResourcesAPIMock
class TestAdapterBuiltInKind(unittest.TestCase):
def test_get_app_kind(self):
self.assertEqual(split_str_by_capital_letters('ConfigMap'), 'config_map')
self.assertEqual(split_str_by_capital_letters('Namespace'), 'namespace')
self.assertEqual(split_str_by_capital_letters('PodDisruptionBudget'), 'pod_disruption_budget')
def test_app_get_fail(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'Deployment', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
deployment.get()
self.assertTrue('Get deployment fail' in str(context.exception))
storage = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'StorageClass', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
storage.get()
self.assertTrue('Get storage class fail' in str(context.exception))
def test_app_get_not_found(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock('404'),
spec={'kind': 'Deployment', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}})
res = deployment.get()
self.assertEqual(res, None)
storage = AdapterBuiltinKind(
api=K8sClientMock('404'),
spec={'kind': 'StorageClass', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}})
res = storage.get()
self.assertEqual(res, None)
def test_app_get(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock(),
spec={'kind': 'Deployment', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}})
res = deployment.get()
self.assertEqual(res.metadata, {'key1': 'value1'})
storage = AdapterBuiltinKind(
api=K8sClientMock(),
spec={'kind': 'StorageClass', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}})
res = storage.get()
self.assertEqual(res.metadata, {'key1': 'value1'})
def test_app_create_fail(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'Deployment', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
deployment.create()
self.assertTrue('Create deployment fail' in str(context.exception))
storage = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'StorageClass', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
storage.create()
self.assertTrue('Create storage class fail' in str(context.exception))
def test_app_create(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock(''),
spec={'kind': 'Deployment', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
res = deployment.create()
self.assertEqual(res, {'key1': 'value1'})
storage = AdapterBuiltinKind(
api=K8sClientMock(''),
spec={'kind': 'StorageClass', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
res = storage.create()
self.assertEqual(res, {'key1': 'value1'})
def test_app_replace_fail(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'Deployment', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
deployment.replace({})
self.assertTrue('Replace deployment fail' in str(context.exception))
storage = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'StorageClass', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
storage.replace({})
self.assertTrue('Replace storage class fail' in str(context.exception))
def test_app_replace(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock(''),
spec={'kind': 'Deployment', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
res = deployment.replace({})
self.assertEqual(res, {'key1': 'value1'})
storage = AdapterBuiltinKind(
api=K8sClientMock(''),
spec={'kind': 'StorageClass', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
res = storage.replace({})
self.assertEqual(res, {'key1': 'value1'})
def test_app_replace_service(self):
deployment = AdapterBuiltinKind(
api=K8sClientMock(''),
spec={'kind': 'Service', 'metadata': {'name': ''}, 'spec': {'replicas': 1}})
res = deployment.replace({})
self.assertEqual(res, {'key1': 'value1'})
def test_app_delete_fail(self):
client = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'Deployment', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
client.delete()
self.assertTrue('Delete deployment fail' in str(context.exception))
storage = AdapterBuiltinKind(
api=K8sClientMock('fail'),
spec={'kind': 'StorageClass', 'metadata': {'name': 'fail'}, 'spec': {'replicas': 1}})
with self.assertRaises(ProvisioningError) as context:
storage.delete()
self.assertTrue('Delete storage class fail' in str(context.exception))
def test_app_delete_not_found(self):
client = AdapterBuiltinKind(
api=K8sClientMock('404'),
spec={'kind': 'Deployment', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}})
res = client.delete()
self.assertEqual(res, None)
storage = AdapterBuiltinKind(
api=K8sClientMock('404'),
spec={'kind': 'StorageClass', 'metadata': {'name': '404'}, 'spec': {'replicas': 1}})
res = storage.delete()
self.assertEqual(res, None)
def test_app_delete(self):
client = AdapterBuiltinKind(
api=K8sClientMock(),
spec={'kind': 'Deployment', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}})
res = client.delete()
self.assertEqual(res, {'key1': 'value1'})
storage = AdapterBuiltinKind(
api=K8sClientMock(),
spec={'kind': 'StorageClass', 'metadata': {'name': 'test'}, 'spec': {'replicas': 1}})
res = storage.delete()
self.assertEqual(res, {'key1': 'value1'})
class TestAdapter(unittest.TestCase):
def test_get_instance_custom(self):
self.assertIsInstance(
Adapter.get_instance({'kind': "CustomKind"}, CustomObjectsAPIMock(), ResourcesAPIMock()),
AdapterCustomKind
)
self.assertIsInstance(
Adapter.get_instance({'kind': "CustomKind"}, CustomObjectsAPIMock(), ResourcesAPIMock()),
AdapterCustomKind
)
def test_get_instance_test(self):
self.assertIsInstance(
Adapter.get_instance(
{
'kind': Adapter.kinds_builtin[0],
'apiVersion': 'test/test'
}
).api, K8sClientMock)
def test_get_instance_builtin(self):
self.assertIsInstance(
Adapter.get_instance(
{
'kind': Adapter.kinds_builtin[0],
'apiVersion': 'apps/v1beta1'
}
), AdapterBuiltinKind)
def test_get_instance_negative(self):
self.assertIsNone(
Adapter.get_instance(
{
'kind': Adapter.kinds_builtin[0],
'apiVersion': 'unknown'
}
)
)
class TestAdapterCustomKind(unittest.TestCase):
@staticmethod
def _resources_api_mock():
return ResourcesAPIMock(
'version',
'group/version',
[V1APIResource(None, 'group', 'kind', 'kinds', True, [], 'kind', [])]
)
def test_initialization_positive(self):
adapter = Adapter.get_instance(
{
'kind': 'kind',
'apiVersion': 'group/version',
'metadata': {
"namespace": 'test_namespace'
}
}, CustomObjectsAPIMock(), TestAdapterCustomKind._resources_api_mock()
)
self.assertEqual(adapter.kind, 'kind')
self.assertEqual(adapter.namespace, 'test_namespace')
self.assertEqual(adapter.group, 'group')
self.assertEqual(adapter.version, 'version')
self.assertEqual(adapter.plural, 'kinds')
self.assertIsInstance(adapter.api, CustomObjectsAPIMock)
def test_initialization_kind_missing(self):
adapter = Adapter.get_instance({}, CustomObjectsAPIMock(), TestAdapterCustomKind._resources_api_mock())
self.assertFalse(adapter.kind)
self.assertFalse(adapter.plural)
def test_initialization_api_version_invalid(self):
adapter = Adapter.get_instance({}, CustomObjectsAPIMock(), TestAdapterCustomKind._resources_api_mock())
self.assertFalse(adapter.group)
self.assertFalse(adapter.version)
adapter = Adapter.get_instance(
{'apiVersion': 'noslash'},
CustomObjectsAPIMock(),
TestAdapterCustomKind._resources_api_mock()
)
self.assertFalse(adapter.group)
self.assertFalse(adapter.version)
adapter = Adapter.get_instance(
{'apiVersion': 'domain/version/something'},
CustomObjectsAPIMock(),
ResourcesAPIMock()
)
self.assertEqual(adapter.group, 'domain')
self.assertEqual(adapter.version, 'version/something')
|
from nose.tools import assert_equals, assert_true, assert_false
from ..mdp import MDP
class TestMDP:
def test_mdp_solver_value_iteration_linear_mdp(self):
"""
Value iteration should find optimal V(s) for a simple linear MDP
"""
m = MDP()
a_forward = m.add_action('forward')
m.add_transition('s0', a_forward, 's1', 1)
m.add_transition('s1', a_forward, 's2', 1)
m.add_transition('s2', a_forward, 's3', 1)
m.add_transition('s3', a_forward, 't', 1)
m.make_terminal('t')
v = m.solve(discount=1.0)
assert_equals(v['s0'], 4)
assert_equals(v['s1'], 3)
assert_equals(v['s2'], 2)
assert_equals(v['s3'], 1)
assert_equals(v['t'], 0)
def test_mdp_solver_value_iteration_toy_mdp(self):
"""
Value iteration should find optimal V(s) for toy MDP from Slide 38 of
David Silver's MDP lecture:
http://www0.cs.ucl.ac.uk/staff/d.silver/web/Teaching_files/MDP.pdf
"""
m = MDP()
a1 = m.add_action('a1')
a2 = m.add_action('a2')
a3 = m.add_action('a3')
m.add_transition('facebook', a1, 'facebook', -1)
m.add_transition('facebook', a2, 'class1', 0)
m.add_transition('class1', a1, 'facebook', -1)
m.add_transition('class1', a2, 'class2', -2)
m.add_transition('class2', a1, 'sleep', 0)
m.add_transition('class2', a2, 'class3', -2)
m.add_transition('class3', a1, 'sleep', 10)
m.add_transition('class3', a2, 'pub', 1)
m.add_transition('pub', a1, ['class1', 'class2', 'class3'], [0, 0, 0], [0.2, 0.4, 0.4])
m.make_terminal('sleep')
v = m.solve(discount=1.0)
assert_equals(v['facebook'], 6.0)
assert_equals(v['class1'], 6.0)
assert_equals(v['class2'], 8.0)
assert_equals(v['class3'], 10)
assert_equals(v['sleep'], 0)
assert_equals(v['pub'], 8.4)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import MySQLdb
import conf
class MySqlHelper(object):
def __init__(self):
self.conn_dict = dict(conf.conn_dict) #导入链接数据库字符串
#{host:'192.168.75.133',user:'zhangyage',passwd:'zhangyage',db:'oldboy'}
def Get_Dict(self,sql,params): #获取多行数据
conn = MySQLdb.connect(**self.conn_dict)
cur = conn.cursor()
result1=cur.execute(sql,params)
data = cur.fetchall() #保存sql执行的结果
cur.close()
conn.close()
return data
def Get_One(self,sql,params): #获取一行数据
conn = MySQLdb.connect(**self.conn_dict)
cur = conn.cursor()
result1=cur.execute(sql,params)
data = cur.fetchone() #保存sql执行的结果
cur.close()
conn.close()
return data
#helper = MySqlHelper()
#sql = 'select * from Userinfo where id > %s'
#params = (5,)
#print helper.Get_Dict(sql, params)
|
from enum import Enum
from enum import auto
from typing import Optional
from typing import Mapping
from typing import Sequence
import forsyde.io.python.core as core
class VertexTrait(core.Trait, Enum):
{%- for type_name, type_data in vertexTraitSuper.items() %}
{{type_name}} = auto()
{%- endfor %}
@classmethod
def refines_static(cls, one, other):
{%- for type_name, type_data in vertexTraitSuper.items() %}
{%- for super_trait in type_data %}
if one is cls.{{type_name}} and other is cls.{{super_trait}}:
return True
{%- endfor %}
{%- endfor %}
return False
def refines(self, o):
return VertexTrait.refines_static(self, o)
class EdgeTrait(core.Trait, Enum):
{%- for type_name, type_data in edgeTraitSuper.items() %}
{{type_name}} = auto()
{%- endfor %}
@classmethod
def refines_static(cls, one, other):
{%- for type_name, type_data in edgeTraitSuper.items() %}
{%- for super_trait in type_data %}
if one is cls.{{type_name}} and other is cls.{{super_trait}}:
return True
{%- endfor %}
{%- endfor %}
return False
def refines(self, o):
return EdgeTrait.refines_static(self, o)
class VertexAcessor(object):
"""This class is a method holder for all the possible type-safe acesses
for the vertexes properties."""
{%- for prop_name, prop_data in property_map.items() %}
@classmethod
def get_{{prop_name}}(cls, v: core.Vertex) -> Optional[{{prop_data[1]}}]:
if "{{prop_name}}" in v.properties:
return v.properties["{{prop_name}}"]
else:
{%- for trait_name in prop_data[0] %}
{%- if prop_name in default_property_map %}
if v.has_trait(VertexTrait.{{trait_name}}):
return {{default_property_map[prop_name]}}
{%- else %}
if v.has_trait(VertexTrait.{{trait_name}}):
raise ValueError("Property {{prop_name}} should exist in vertex '" + v.identifier + "' with trait {{trait_name}}, but does not.")
{%- endif %}
{%- endfor %}
else:
return None
{% endfor %}
|
import sys
from itertools import chain
import pathlib
from wheel.bdist_wheel import (
get_abi_tag,
get_platform as get_platform_tag,
)
from wheel.wheelfile import WheelFile
def remove_source_files(input_folder):
in_fd = pathlib.Path(input_folder)
if not in_fd.is_dir():
raise RuntimeError(f'input_folder={input_folder} is not a folder.')
src_files = list(
chain(
# Python.
in_fd.glob('**/*.py'),
in_fd.glob('**/*.pyc'),
# Cython.
in_fd.glob('**/*.pyx'),
in_fd.glob('**/*.pyd'),
# C/C++
in_fd.glob('**/*.c'),
in_fd.glob('**/*.C'),
in_fd.glob('**/*.cc'),
in_fd.glob('**/*.cpp'),
in_fd.glob('**/*.cxx'),
in_fd.glob('**/*.c++'),
in_fd.glob('**/*.h'),
in_fd.glob('**/*.H'),
in_fd.glob('**/*.hh'),
in_fd.glob('**/*.hpp'),
in_fd.glob('**/*.hxx'),
in_fd.glob('**/*.h++'),
)
)
for src_file in src_files:
src_file.unlink()
return src_files
def generate_whl_name(
input_folder,
distribution,
version,
build_tag,
abi_tag=None,
platform_tag=None,
):
python_tag = 'cp' + ''.join(map(str, sys.version_info[:2]))
abi_tag = abi_tag or get_abi_tag()
platform_tag = platform_tag or get_platform_tag(input_folder)
components = [distribution, version]
if build_tag:
components.append(build_tag)
components.extend([python_tag, abi_tag, platform_tag])
return '-'.join(components) + '.whl'
def repack_whl(input_folder, output_whl):
with WheelFile(output_whl, 'w') as wf:
wf.write_files(input_folder)
def debug():
import os
pywhlobf_data = os.getenv('PYWHLOBF_DATA')
assert pywhlobf_data
src_files = remove_source_files(f'{pywhlobf_data}/prep/textwolf-0.9.0')
print(src_files)
whl_name = generate_whl_name(f'{pywhlobf_data}/prep/textwolf-0.9.0', 'textwolf', '0.9.0', None)
print(whl_name)
repack_whl(
f'{pywhlobf_data}/prep/textwolf-0.9.0',
f'{pywhlobf_data}/prep/{whl_name}',
)
|
import numpy as np
import pandas as pd
from flask_cors import cross_origin
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
@cross_origin()
@app.route('/')
def home():
return render_template('index.html')
@cross_origin()
@app.route('/predict',methods=['POST'])
def predict():
Pregnancies = float(request.form['Pregnancies'])
Glucose = float(request.form['Glucose'])
BloodPressure = float(request.form['BloodPressure'])
SkinThickness = float(request.form['SkinThickness'])
Insulin = float(request.form['Insulin'])
BMI = float(request.form['BMI'])
DiabetesPedigreeFunction = float(request.form['DiabetesPedigreeFunction'])
Age = float(request.form['Age'])
filename = 'modelForPrediction.sav'
loaded_model = pickle.load(open(filename, 'rb')) # loading the model file from the storage
scalar = pickle.load(open("sandardScalar.sav", 'rb'))
# predictions using the loaded model file
prediction = loaded_model.predict(scalar.transform(
[[Pregnancies, Glucose, BloodPressure, SkinThickness, Insulin, BMI, DiabetesPedigreeFunction, Age]]))
if prediction ==[1]:
prediction = "diabetes"
else:
prediction = "Normal"
# showing the prediction results in a UI
if prediction =="diabetes":
return render_template('diabetes.html', prediction=prediction)
else:
return render_template('Normal.html',prediction=prediction)
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8000, debug=True)
#app.run(debug=True)
|
#!/usr/bin/env python3
import sys
from jinja2 import Environment, FileSystemLoader
from pprint import pprint
from webob import Request, Response
dispatch = {
'/': 'index.html',
'/index.html': 'index.html',
'/about/aboutme.html': 'about/aboutme.html',
'/github.css': 'github.css'
}
include = (
'app.js',
'react.js',
'leaflet.js',
'D3.js',
'moment.js',
'math.js',
'main.css',
'bootstrap.css',
'normalize.css'
)
css = filter(lambda s: s.endswith('.css'), include)
js = filter(lambda s: s.endswith('.js'), include)
env = Environment(loader = FileSystemLoader('template_static'))
class WsgiTopBottomMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
response = self.app(environ, start_response).decode()
yield response.encode()
def app(environ, start_response):
path = environ['PATH_INFO']
res = b''
if (path in dispatch):
template = env.get_template(dispatch[path])
start_response('200 OK', [('Content-Type', 'text/html')])
res = (template.render(css=css, js=js)).encode()
else:
start_response('404 Not Found', [])
return res
def request(uri):
req = Request.blank(uri)
pprint(req)
print(req.get_response(app))
if __name__ == '__main__':
if sys.version_info < (3, 6):
sys.exit('Python 3.6 or later is required.\n')
app = WsgiTopBottomMiddleware(app)
request('/')
request('/about/aboute.html')
|
import frappe
from frappe import _
@frappe.whitelist()
def add_bill_amount(item):
r = frappe.db.get_value("Food Item",item,["name","subsidy_rate","original_rate","limit"])
return r
|
#!/usr/bin/env python3
import os
import sys
import logging
from p1204_3.generic import VIDEOPARSER_REPO
def __video_parser_dir():
this_path = os.path.dirname(os.path.realpath(__file__))
return os.path.join(
this_path,
"bitstream_mode3_videoparser",
)
def run_videoparser(video_segment_file, output_dir_full_path, skipexisting=True):
"""
Run video parser on a video, save report to output_dir_full_path
"""
logging.info("run bitstream parser for {}".format(video_segment_file))
report_file_name = os.path.join(
output_dir_full_path,
os.path.splitext(os.path.basename(video_segment_file))[0] + ".json.bz2"
)
if skipexisting and os.path.isfile(report_file_name):
return report_file_name
parser_script = os.path.join(
__video_parser_dir(),
"parser.sh"
)
cmd = f"""{parser_script} "{video_segment_file}" --output "{report_file_name}" """
ret = os.system(cmd)
if ret != 0:
logging.error(f"there was something wrong with {video_segment_file}")
logging.error(f"please check the following command: \n {cmd}")
return ""
return report_file_name
def check_or_install_videoparser():
"""
Check if videoparser is installed, otherwise install it
"""
logging.info("check or install video parser")
videoparser_directory = __video_parser_dir()
if os.path.isdir(videoparser_directory):
logging.info("video parser is checked out")
# perform update for "main part", TODO: think about a better handling, in case we change c++ parts of the parser, maybe as a separate python module?
#os.system(f"cd {videoparser_directory} && git pull origin master")
if os.path.isfile(
os.path.join(
videoparser_directory,
"VideoParser",
"libvideoparser.so"
)
):
logging.info("video parser is build")
return
logging.error("video parser is not build correctly, please check")
return
logging.info("clone and build video parser, this will take some time")
os.system(f"git clone {VIDEOPARSER_REPO} {videoparser_directory}")
os.system(os.path.join(videoparser_directory, "build.sh"))
|
#!/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import glob
import optparse
import shutil
import time
import seqnum
# Import Broadband modules
from install_cfg import InstallCfg
import bband_utils
# Enumerated types
class Methods(object):
"""
Defines available models on the platform
"""
gp, ucsb, sdsu, exsim, song, irikura1, irikura2, csm = range(8)
labels = ["GP", "UCSB", "SDSU", "EXSIM", "SONG", "IRIKURA_RECIPE_M1", "IRIKURA_RECIPE_M2"]
options = ["gp", "ucsb", "sdsu", "exsim", "song", "irikura1", "irikura2"]
#labels = ["GP", "UCSB", "SDSU", "EXSIM", "SONG", "IRIKURA_RECIPE_M1", "IRIKURA_RECIPE_M2", "CSM"]
#options = ["gp", "ucsb", "sdsu", "exsim", "song", "irikura1", "irikura2", "csm"]
class GenAcceptTests(object):
def __init__(self, resume=True):
install = InstallCfg.getInstance()
self.resume = resume
self.resume_list = []
# Read checkpoint file
if self.resume == True:
resume_file = os.path.join(install.A_OUT_LOG_DIR,
"gen_resume.txt")
if os.path.exists(resume_file):
resume_fp = open(resume_file, 'r')
self.resume_list = resume_fp.read().splitlines()
resume_fp.close()
else:
self.resume = False
# Setup paths
self.input_dir = os.path.join(install.A_TEST_REF_DIR,
"accept_inputs")
self.ref_dir = os.path.join(install.A_TEST_REF_DIR,
"accept_refs")
def generate_option_list(self):
"""
This function creates the option files for all test cases
"""
install = InstallCfg.getInstance()
numvalid = 0
numuser = 0
optfiles = {}
# Make sure run directory exists
cmd = "mkdir -p %s" % (install.A_USER_DATA_DIR)
bband_utils.runprog(cmd)
# Clear out run directory
user_temp = os.path.join(install.A_USER_DATA_DIR, "tmp")
if os.path.exists(user_temp):
shutil.rmtree(user_temp)
filelist = os.listdir(install.A_USER_DATA_DIR)
if not os.path.exists(user_temp):
os.mkdir(user_temp)
for file_entry in filelist:
shutil.move(os.path.join(install.A_USER_DATA_DIR, file_entry),
os.path.join(user_temp, file_entry))
basedir = os.path.join(install.A_TEST_REF_DIR, "accept_inputs")
# Copy input files to run directory
shutil.copy2(os.path.join(basedir, "northridge_3_sta.stl"),
"%s" % install.A_USER_DATA_DIR)
shutil.copy2(os.path.join(basedir, "northridge_eq_gp.src"),
"%s" % install.A_USER_DATA_DIR)
shutil.copy2(os.path.join(basedir, "northridge_eq_ucsb.src"),
"%s" % install.A_USER_DATA_DIR)
shutil.copy2(os.path.join(basedir, "northridge_eq_song.src"),
"%s" % install.A_USER_DATA_DIR)
# Ensure input dir and ref dir exist
cmd = "mkdir -p %s" % (self.input_dir)
bband_utils.runprog(cmd)
cmd = "mkdir -p %s" % (self.ref_dir)
bband_utils.runprog(cmd)
# Get sorted list of station files for menu selections
files = glob.glob("%s%s*.stl" % (install.A_USER_DATA_DIR, os.sep))
stafiles = []
for file_entry in files:
stafiles.append(os.path.basename(file_entry))
stafiles.sort()
# Validation simulations
mode = "valid-northridge"
for method in xrange(0, len(Methods.labels)):
optfile = "%s-%s.txt" % (mode, Methods.labels[method])
print("Generating %s" % (optfile))
opts = []
# Select validation run
opts.append('y')
# Select the validation event
opts.append("NR")
# Select method
opts.append(Methods.options[method])
# We don't need a custom source file
opts.append('n')
# For GP, UCSB, and SDSU, SONG, Irikura1, Irikura2
# we want to run the rupture generator
if method in [Methods.gp, Methods.ucsb, Methods.sdsu, Methods.song, Methods.irikura1, Methods.irikura2]:
opts.append('y')
opts.append('2')
opts.append('1')
opts.append("%d" %
(stafiles.index("northridge_3_sta.stl") + 1))
if method == Methods.exsim:
# No custom EXSIM template file
opts.append('n')
if method != Methods.csm:
# Run site response (CSM does not ask this question)
opts.append('y')
# Skip plots
opts.append('n')
opts.append('n')
# Skip GMPE Comparison
opts.append('n')
# Yes to GoF
opts.append('y')
# GP GoF
opts.append('1')
# No to additional metrics
opts.append('n')
print("\t %s" % (str(opts)))
numvalid = numvalid + 1
optfiles[optfile] = opts
# User simulations
mode = "user"
for method in xrange(0, len(Methods.labels)):
optfile = "%s-%s.txt" % (mode, Methods.labels[method])
print("Generating %s" % (optfile))
opts = []
opts.append('n')
# Select the velocity model, use LABasin
opts.append('LABasin500')
# Select method
opts.append(Methods.options[method])
# Source file
opts.append('1')
if method == Methods.ucsb:
opts.append('northridge_eq_ucsb.src')
elif method == Methods.song:
opts.append('northridge_eq_song.src')
else:
opts.append('northridge_eq_gp.src')
if method != Methods.exsim and method != Methods.csm:
# Use rupture generator
opts.append('y')
# Select station from run directory
opts.append('1')
opts.append("%d" %
(stafiles.index("northridge_3_sta.stl") + 1))
if method == Methods.exsim:
# No custom template for ExSIM
opts.append('n')
if method != Methods.csm:
# Run site response (CSM doesn't ask this question)
opts.append('y')
# No plots
opts.append('n')
opts.append('n')
print("\t %s" % (str(opts)))
numuser = numuser + 1
optfiles[optfile] = opts
print("Number of validation events: %d" % (numvalid))
print("Number of user events: %d" % (numuser))
print("Total aceptance tests: %d" % (numuser + numvalid))
return optfiles
def generate_xml(self, optfiles):
install = InstallCfg.getInstance()
# Generate xml workflows
tests = []
for key in optfiles.keys():
sim_id = int(seqnum.get_seq_num())
test = key.split('.')[0]
xmlfile = os.path.join(self.input_dir, "%s.xml" % (test))
if os.path.basename(xmlfile) in self.resume_list:
# Skip this test
print("Skipping %s" % (key))
continue
print("Generating %s" % (key))
optfile = os.path.join(self.input_dir, key)
# Save the option file
op = open(optfile, 'w')
for line in optfiles[key]:
op.write("%s\n" % (line))
op.close()
# Generate xml
print("Generating xml for %s" % (key))
print("\t %s" % (str(optfiles[key])))
cmd = ("%s --expert -s %d -g -o %s" %
(os.path.join(install.A_COMP_DIR, "run_bbp.py"),
sim_id, optfile))
print("Running: %s" % (cmd))
rc = bband_utils.runprog(cmd, False)
if rc != 0:
print("Failed to run bbp, aborting.")
return []
oldxmlfile = os.path.join(install.A_XML_DIR, "%d.xml" % (sim_id))
shutil.copy2(oldxmlfile, xmlfile)
if not os.path.exists(xmlfile):
print("Workflow %s not found, aborting." % (xmlfile))
return []
tests.append([sim_id, xmlfile])
time.sleep(1)
return tests
def run_tests(self, tests):
install = InstallCfg.getInstance()
# Run the tests and save results as reference data
for test in tests:
if os.path.basename(test[1]) in self.resume_list:
# Skip this test
print("Skipping %s" % (os.path.basename(test[1])))
continue
# Execute each test
cmd = ("%s -s %d -x %s" %
(os.path.join(install.A_COMP_DIR, "run_bbp.py"),
test[0], test[1]))
rc = bband_utils.runprog(cmd, False)
if rc != 0:
print("Failed to run acceptance test %d-%s, aborting." %
(test[0], test[1]))
return 1
# Save the bbp and rd50 files
test_name = os.path.basename(test[1]).split('.')[0]
cmd = "mkdir -p %s" % (os.path.join(self.ref_dir, test_name))
bband_utils.runprog(cmd)
rd50files = glob.glob("%s/%d/%d.*.rd50" %
(install.A_OUT_DATA_DIR, test[0], test[0]))
if len(rd50files) < 1:
print("Did not find expected RotD50 files")
return 1
for rd50_file in rd50files:
filecomps = os.path.basename(rd50_file).split('.')
shutil.copy2(rd50_file,
os.path.join(self.ref_dir, test_name,
"%s.rd50" % (filecomps[1])))
# Write progress to checkpoint file
resume_fp = open(os.path.join(install.A_OUT_LOG_DIR,
"gen_resume.txt"), 'a')
resume_fp.write("%s\n" % os.path.basename(test[1]))
resume_fp.flush()
resume_fp.close()
return 0
if __name__ == '__main__':
# Make sure BBP_DATA_DIR is set, otherwise, the produced XML files
# will not be useable by other people
if not 'BBP_DATA_DIR' in os.environ:
print("Please set BBP_DATA_DIR and try again!")
sys.exit(1)
parser = optparse.OptionParser()
parser.add_option("-o", "--overwrite", action="store_true",
dest="overwrite",
help="Overwrite reference solution")
(options, args) = parser.parse_args()
# Generate options
generator = GenAcceptTests()
option_list = generator.generate_option_list()
if len(option_list) == 0:
print("No options were produced")
sys.exit(1)
# Generate XML test files
test_list = generator.generate_xml(option_list)
if len(test_list) == 0:
print("No tests were produced")
sys.exit(1)
if options.overwrite == True:
# Run and save the solutions
if generator.run_tests(test_list) != 0:
print("Failed to execute acceptance tests")
sys.exit(1)
sys.exit(0)
|
from mxnet.gluon.loss import Loss
from mxnet.gluon.loss import _reshape_like
from mxnet.gluon.loss import _apply_weighting
class MeanSquareLoss(Loss):
def __init__(self, weight=1., batch_axis=0, **kwargs):
super(MeanSquareLoss, self).__init__(weight, batch_axis, **kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.square(pred - label)
loss = _apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
|
import os
import zipfile
#Check root user
if os.geteuid() != 0:
exit("You need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\nExample : sudo python <filename>.py")
#Store all file name in array
Mylist = os.listdir('.')
#Get the number size of array
tfile = int(len(Mylist))
#Print all the data from array
print "List of files contain in current directory: \n"
x = 1
while x < tfile+1:
print x,". ",Mylist[x-1]
x+=1
#Reveice input xml and attach from user
xml = int(input("\nChoose XML backup file: "))
attach = int(input("Choose Attachments backup file: "))
print Mylist[xml-1]
print Mylist[attach-1]
#Copy & paste file to others location
cdir = os.getcwd()
cpxml = 'cp {}/{} /var/atlassian/application-data/jira/import/'.format(cdir,Mylist[xml-1])
cpattach = 'cp {}/{} /var/atlassian/application-data/jira/data/'.format(cdir,Mylist[attach-1])
os.system(cpxml)
os.system(cpattach)
#Unzip Attachments backup file
os.system('rm -rf /var/atlassian/application-data/jira/data/attachments')
loczip = '/var/atlassian/application-data/jira/data/{}'.format(Mylist[attach-1])
unpack_attach = zipfile.ZipFile(loczip)
unpack_attach.extractall('/var/atlassian/application-data/jira/data')
unpack_attach.close()
#Change ownership of file from ROOT to user
own = 'chown -R jira:jira /var/atlassian/application-data/jira/data/attachments'
os.system(own)
|
from __future__ import annotations
import cmath
from typing import Optional
import numpy as np
import psutil
import ray
import scipy.special as ssp
from pymwm.utils import coax_utils
from pymwm.waveguide import Database, Sampling, Waveguide
from .samples import Samples, SamplesForRay, SamplesLowLoss, SamplesLowLossForRay
class Coax(Waveguide):
"""A class defining a coax waveguide."""
def __init__(self, params: dict) -> None:
"""Init Coax class.
Args:
params: A dict whose keys and values are as follows:
'core': A dict of the setting parameters of the core:
'shape': A string indicating the shape of the core.
'ri': A float indicating the inner radius [um].
'r': A float indicating the outer radius [um].
'fill': A dict of the parameters of the core Material.
'clad': A dict of the parameters of the clad Material.
'bounds': A dict indicating the bounds of database.interpolation
and its keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the maximum value of
abs(c / f_imag) [um] where f_imag is the imaginary part
of the frequency.
'modes': A dict of the settings for calculating modes:
'wl_max': A float indicating the maximum wavelength [um]
(default: 5.0)
'wl_min': A float indicating the minimum wavelength [um]
(default: 0.4)
'wl_imag': A float indicating the maximum value of
abs(c / f_imag) [um] where f_imag is the imaginary part
of the frequency. (default: 5.0)
'dw': A float indicating frequency interval
[rad c / 1um]=[2.99792458e14 rad / s]
(default: 1 / 64).
'num_n': An integer indicating the number of orders of
modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
'ls': A list of characters chosen from "h" (horizontal
polarization) and "v" (vertical polarization).
"""
self.ri = params["core"]["ri"]
params["core"]["size"] = params["core"]["r"]
params["core"]["size2"] = self.ri
super().__init__(params)
if self.clad.label == "PEC":
from pymwm.cutoff import Cutoff
co = Cutoff(self.num_n, self.num_m)
self.co_list = []
for n in range(self.num_n):
co_per_n = []
for pol, m_end in [("M", self.num_m + 2), ("E", self.num_m + 1)]:
for m in range(1, m_end):
alpha = (pol, n, m)
co_per_n.append(co(alpha, self.ri / self.r))
self.co_list.append(np.array(co_per_n))
else:
self.co_list = self.samples.co_list
def get_alphas(self, alpha_list: list[tuple[str, int, int]]) -> dict:
alphas: dict = {"h": [], "v": []}
for alpha in [("E", 0, m) for m in range(1, self.num_m + 1)]:
if alpha in alpha_list:
alphas["v"].append(alpha)
for alpha in [
("E", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)
]:
if alpha in alpha_list:
alphas["h"].append(alpha)
alphas["v"].append(alpha)
for alpha in [("M", 0, m) for m in range(1, self.num_m + 1)]:
if alpha in alpha_list:
alphas["h"].append(alpha)
for alpha in [
("M", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)
]:
if alpha in alpha_list:
alphas["h"].append(alpha)
alphas["v"].append(alpha)
return alphas
def betas_convs_samples(self, params: dict) -> tuple[dict, dict, Samples]:
im_factor = self.clad.im_factor
self.clad.im_factor = 1.0
self.clad_params["im_factor"] = 1.0
p_modes = params["modes"].copy()
num_n_0 = p_modes["num_n"]
num_m_0 = p_modes["num_m"]
betas: dict = {}
convs: dict = {}
success = False
catalog = Database().load_catalog()
num_n_max = catalog["num_n"].max()
num_m_max = catalog["num_m"].max()
if not np.isnan(num_n_max):
for num_n, num_m in [
(n, m)
for n in range(num_n_0, num_n_max + 1)
for m in range(num_m_0, num_m_max + 1)
]:
p_modes["num_n"] = num_n
p_modes["num_m"] = num_m
smp = Samples(
self.r, self.fill_params, self.clad_params, p_modes, self.ri
)
try:
betas, convs = smp.database.load()
success = True
break
except IndexError:
continue
if not success:
p_modes["num_n"] = num_n_0
p_modes["num_m"] = num_m_0
betas, convs, smp = self.do_sampling(p_modes)
if im_factor != 1.0:
self.clad.im_factor = im_factor
self.clad_params["im_factor"] = im_factor
betas, convs, smp = self.do_sampling_for_im_factor(betas, convs, p_modes)
return betas, convs, smp
def do_sampling(self, p_modes: dict) -> tuple[dict, dict, Samples]:
num_n_0 = p_modes["num_n"]
num_m_0 = p_modes["num_m"]
smp = Samples(self.r, self.fill_params, self.clad_params, p_modes, self.ri)
ray.shutdown()
try:
ray.init()
p_modes_id = ray.put(p_modes)
pool = ray.util.ActorPool(
SamplesForRay.remote(
self.r, self.fill_params, self.clad_params, p_modes_id, self.ri
)
for _ in range(psutil.cpu_count())
)
xs_success_wr_list: list[tuple[np.ndarray, np.ndarray]] = list(
pool.map(lambda a, arg: a.wr_sampling.remote(arg), range(num_n_0))
)
num_wr = xs_success_wr_list[0][0].shape[0]
args = []
for n in range(num_n_0):
xs_array, success_array = xs_success_wr_list[n]
for iwr in range(num_wr):
args.append((n, iwr, xs_array[iwr], success_array[iwr]))
xs_success_wi_list: list[tuple[np.ndarray, np.ndarray]] = list(
pool.map(lambda a, arg: a.wi_sampling.remote(arg), args)
)
num_wi = xs_success_wi_list[0][0].shape[0]
xs_success_list: list[tuple[np.ndarray, np.ndarray]] = []
for n in range(num_n_0):
xs_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=complex)
success_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=bool)
for iwr in range(num_wr):
i = num_wr * n + iwr
xs_i, success_i = xs_success_wi_list[i]
xs_array[iwr] = xs_i
success_array[iwr] = success_i
xs_success_list.append((xs_array, success_array))
finally:
ray.shutdown()
betas, convs = smp.betas_convs(xs_success_list)
smp.database.save(betas, convs)
return betas, convs, smp
def do_sampling_for_im_factor(
self, betas: dict, convs: dict, p_modes: dict
) -> tuple[dict, dict, SamplesLowLoss]:
smp = SamplesLowLoss(
self.r, self.fill_params, self.clad_params, p_modes, self.ri
)
try:
betas, convs = smp.database.load()
except IndexError:
num_n = p_modes["num_n"]
num_m = p_modes["num_m"]
args = []
for iwr in range(len(smp.ws)):
for iwi in range(len(smp.wis)):
xis_list = []
for n in range(num_n):
xis = []
for i in range(num_m + 1):
xis.append(betas[("M", n, i + 1)][iwr, iwi] ** 2)
for i in range(num_m):
xis.append(betas[("E", n, i + 1)][iwr, iwi] ** 2)
xis_list.append(xis)
args.append((iwr, iwi, xis_list))
try:
ray.init()
p_modes_id = ray.put(p_modes)
pool = ray.util.ActorPool(
SamplesLowLossForRay.remote(
self.r, self.fill_params, self.clad_params, p_modes_id, self.ri
)
for _ in range(psutil.cpu_count())
)
xs_success_list = list(
pool.map(lambda a, arg: a.task.remote(arg), args)
)
finally:
ray.shutdown()
betas, convs = smp.betas_convs(xs_success_list)
smp.database.save(betas, convs)
return betas, convs, smp
def beta(self, w: complex, alpha: tuple[str, int, int]) -> complex:
"""Return phase constant
Args:
w: A complex indicating the angular frequency
alpha: (pol, n, m)
pol: 'M' (TM-like mode) or 'E' (TE-like mode)
n: The order of the mode
m: The sub order of the mode.
Returns:
h: The phase constant.
"""
if self.clad.label == "PEC":
return self.beta_pec(w, alpha)
wr = w.real
wi = w.imag
hr: float = self.beta_funcs[(alpha, "real")](wr, wi)[0, 0]
hi: float = self.beta_funcs[(alpha, "imag")](wr, wi)[0, 0]
# if hr < 0:
# hr = 1e-16
# if hi < 0:
# hi = 1e-16
return hr + 1j * hi
def beta_pec(self, w: complex, alpha: tuple[str, int, int]) -> complex:
"""Return phase constant of PEC waveguide
Args:
w: A complex indicating the angular frequency
alpha: A tuple (pol, n, m) where pol is 'M' for TM mode or
'E' for TE mode, n is the order of the mode, and m is the
number of modes in the order and the polarization.
Returns:
h: A complex indicating the phase constant.
"""
w_comp = w.real + 1j * w.imag
pol, n, m = alpha
if pol == "M":
chi = self.co_list[n][m - 1]
else:
chi = self.co_list[n][self.num_m + m]
val = cmath.sqrt(self.fill(w_comp) * w_comp ** 2 - chi ** 2 / self.r ** 2)
if abs(val.real) > abs(val.imag):
if val.real < 0:
val *= -1
else:
if val.imag < 0:
val *= -1
return val
def coef(self, h: complex, w: complex, alpha: tuple[str, int, int]) -> tuple:
"""Return the coefficients of TE- and TM- components which compose
the hybrid mode.
Args:
h: Phase constant.
w: Angular frequency
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
Returns:
array([a1, b1, a2, b2, c2, d2, a3, b3]):
a1: Coefficient of TE-component for core rod
b1: Coefficient of TM-component for core rod
a2: Coefficient of TE-component described by Jn for dielectric region
b2: Coefficient of TM-component described by Jn for dielectric region
c2: Coefficient of TE-component described by Yn for dielectric region
d2: Coefficient of TM-component described by Yn for dielectric region
a3: Coefficient of TE-component for clad metal
b3: Coefficient of TM-component for clad metal
"""
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
e1 = self.fill(w)
e2 = self.clad(w)
ee = e1 / e2
u = self.samples.u(h ** 2, w, e1)
ju = ssp.jv(n, u)
jpu = -ssp.jv(n + 1, u) + n / u * ju
yu = ssp.yv(n, u)
ypu = -ssp.yv(n + 1, u) + n / u * yu
if e2.real < -1e6:
a1 = b1 = a3 = b3 = 0.0j
if pol == "TE":
a2 = 1.0 + 0.0j
c2 = -jpu / ypu
b2 = d2 = 0.0j
else:
b2 = 1.0 + 0.0j
d2 = -ju / yu
a2 = c2 = 0.0j
else:
hew = h ** 2 / e2 / w ** 2
x = self.samples.x(h ** 2, w, e1)
y = self.samples.y(h ** 2, w, e2)
v = self.samples.v(h ** 2, w, e2)
kv = ssp.kv(n, v)
kpv = -ssp.kv(n + 1, v) + n / v * kv
jx = ssp.jv(n, x)
jpx = -ssp.jv(n + 1, x) + n / x * jx
yx = ssp.yv(n, x)
ypx = -ssp.yv(n + 1, x) + n / x * yx
iy = ssp.iv(n, y)
ipy = ssp.iv(n + 1, y) + n / y * iy
nuv = n * (v / u + u / v)
nxy = n * (y / x + x / y)
a = np.array(
[
[
jpu * kv * v + kpv * ju * u,
ypu * kv * v + kpv * yu * u,
nuv * ju * kv,
nuv * yu * kv,
],
[
jpx / yx * y + ipy / iy * jx / yx * x,
ypx / yx * y + ipy / iy * x,
nxy * jx / yx,
nxy,
],
[
hew * nuv * ju * kv,
hew * nuv * yu * kv,
ee * jpu * kv * v + kpv * ju * u,
ee * ypu * kv * v + kpv * yu * u,
],
[
hew * nxy * jx / yx,
hew * nxy,
ee * jpx / yx * y + ipy / iy * jx / yx * x,
ee * ypx / yx * y + ipy / iy * x,
],
]
)
if pol == "E":
a2 = 1.0 + 0j
A = a[1:, 1:]
B = -a[1:, 0]
c2, b2, d2 = np.linalg.solve(A, B)
else:
b2 = 1.0 + 0j
A = a[[0, 1, 3]][:, [0, 1, 3]]
B = -a[[0, 1, 3]][:, 2]
a2, c2, d2 = np.linalg.solve(A, B)
a1 = -x / (y * iy) * (jx * a2 + yx * c2)
b1 = -x / (y * iy) * (jx * b2 + yx * d2)
a3 = -u / (v * kv) * (ju * a2 + yu * c2)
b3 = -u / (v * kv) * (ju * b2 + yu * d2)
vals = (a1, b1, a2, b2, c2, d2, a3, b3)
norm = self.norm(h, w, alpha, vals)
return tuple(val / norm for val in vals)
def norm(
self, h: complex, w: complex, alpha: tuple[str, int, int], coef: tuple
) -> complex:
a1, b1, a2, b2, c2, d2, a3, b3 = coef
r, ri = self.r, self.ri
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
e1 = self.fill(w)
e2 = self.clad(w)
en = 1 if n == 0 else 2
u = self.samples.u(h ** 2, w, e1)
x = self.samples.x(h ** 2, w, e1)
ju = ssp.jv(n, u)
jpu = -ssp.jv(n + 1, u) + n / u * ju
yu = ssp.yv(n, u)
ypu = -ssp.yv(n + 1, u) + n / u * yu
jx = ssp.jv(n, x)
jpx = -ssp.jv(n + 1, x) + n / x * jx
yx = ssp.yv(n, x)
ypx = -ssp.yv(n + 1, x) + n / x * yx
I2 = cmath.pi * (
1
/ en
* (
(
r ** 2
* (jpu ** 2 + (1 - n ** 2 / u ** 2) * ju ** 2 + 2 * jpu * ju / u)
- ri ** 2
* (jpx ** 2 + (1 - n ** 2 / x ** 2) * jx ** 2 + 2 * jpx * jx / x)
)
* (a2 ** 2 + b2 ** 2)
+ (
r ** 2
* (ypu ** 2 + (1 - n ** 2 / u ** 2) * yu ** 2 + 2 * ypu * yu / u)
- ri ** 2
* (ypx ** 2 + (1 - n ** 2 / x ** 2) * yx ** 2 + 2 * ypx * yx / x)
)
* (c2 ** 2 + d2 ** 2)
+ 2
* (
r ** 2
* (jpu * ypu + (1 - n ** 2 / u ** 2) * ju * yu + 2 * jpu * yu / u)
- ri ** 2
* (jpx * ypx + (1 - n ** 2 / x ** 2) * jx * yx + 2 * jpx * yx / x)
)
* (a2 * c2 + b2 * d2)
)
+ 2
* n
* (
(r ** 2 / u ** 2 * ju ** 2 - ri ** 2 / x ** 2 * jx ** 2) * a2 * b2
+ (r ** 2 / u ** 2 * yu ** 2 - ri ** 2 / x ** 2 * yx ** 2) * c2 * d2
+ (r ** 2 / u ** 2 * ju * yu - ri ** 2 / x ** 2 * jx * yx)
* (a2 * d2 + b2 * c2)
)
)
if e2.real < -1e6:
return cmath.sqrt(I2)
else:
v = self.samples.v(h ** 2, w, e2)
y = self.samples.y(h ** 2, w, e2)
kv = ssp.kv(n, v)
kpv = -ssp.kv(n + 1, v) + n / v * kv
iy = ssp.iv(n, y)
ipy = ssp.iv(n + 1, y) + n / y * iy
I1 = (
cmath.pi
* ri ** 2
* (
1
/ en
* (ipy ** 2 - (1 + n ** 2 / y ** 2) * iy ** 2 + 2 * ipy * iy / y)
* (a1 ** 2 + b1 ** 2)
+ 2 * n * iy ** 2 / y ** 2 * a1 * b1
)
)
I3 = (
-cmath.pi
* r ** 2
* (
1
/ en
* (kpv ** 2 - (1 + n ** 2 / v ** 2) * kv ** 2 + 2 * kpv * kv / v)
* (a3 ** 2 + b3 ** 2)
+ 2 * n * kv ** 2 / v ** 2 * a3 * b3
)
)
return cmath.sqrt(I1 + I2 + I3)
@staticmethod
def y_te(w, h):
return h / w
def y_tm_core(self, w, h):
e = self.fill(w)
return e * w / h
def y_tm_clad(self, w, h):
e = self.clad(w)
return e * w / h
def Y(
self, w: complex, h: complex, alpha: tuple[str, int, int], coef: tuple
) -> complex:
"""Return the effective admittance of the waveguide mode
Args:
w: Angular frequency
h: Phase constant.
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
coef: array([a1, b1, a2, b2, c2, d2, a3, b3])
a1: Coefficient of TE-component for core rod
b1: Coefficient of TM-component for core rod
a2: Coefficient of TE-component described by Jn for dielectric region
b2: Coefficient of TM-component described by Jn for dielectric region
c2: Coefficient of TE-component described by Yn for dielectric region
d2: Coefficient of TM-component described by Yn for dielectric region
a3: Coefficient of TE-component for clad metal
b3: Coefficient of TM-component for clad metal
Returns:
y: Effective admittance
"""
a1, b1, a2, b2, c2, d2, a3, b3 = coef
r, ri = self.r, self.ri
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
e1 = self.fill(w)
e2 = self.clad(w)
en = 1 if n == 0 else 2
y_te = Coax.y_te(w, h)
y_tm1 = self.y_tm_core(w, h)
y_tm2 = self.y_tm_clad(w, h)
u = self.samples.u(h ** 2, w, e1)
x = self.samples.x(h ** 2, w, e1)
ju = ssp.jv(n, u)
jpu = -ssp.jv(n + 1, u) + n / u * ju
yu = ssp.yv(n, u)
ypu = -ssp.yv(n + 1, u) + n / u * yu
jx = ssp.jv(n, x)
jpx = -ssp.jv(n + 1, x) + n / x * jx
yx = ssp.yv(n, x)
ypx = -ssp.yv(n + 1, x) + n / x * yx
I2 = cmath.pi * (
1
/ en
* (
(
r ** 2
* (jpu ** 2 + (1 - n ** 2 / u ** 2) * ju ** 2 + 2 * jpu * ju / u)
- ri ** 2
* (jpx ** 2 + (1 - n ** 2 / x ** 2) * jx ** 2 + 2 * jpx * jx / x)
)
* (y_te * a2 ** 2 + y_tm1 * b2 ** 2)
+ (
r ** 2
* (ypu ** 2 + (1 - n ** 2 / u ** 2) * yu ** 2 + 2 * ypu * yu / u)
- ri ** 2
* (ypx ** 2 + (1 - n ** 2 / x ** 2) * yx ** 2 + 2 * ypx * yx / x)
)
* (y_te * c2 ** 2 + y_tm1 * d2 ** 2)
+ 2
* (
r ** 2
* (jpu * ypu + (1 - n ** 2 / u ** 2) * ju * yu + 2 * jpu * yu / u)
- ri ** 2
* (jpx * ypx + (1 - n ** 2 / x ** 2) * jx * yx + 2 * jpx * yx / x)
)
* (y_te * a2 * c2 + y_tm1 * b2 * d2)
)
+ n
* (y_te + y_tm1)
* (
(r ** 2 / u ** 2 * ju ** 2 - ri ** 2 / x ** 2 * jx ** 2) * a2 * b2
+ (r ** 2 / u ** 2 * yu ** 2 - ri ** 2 / x ** 2 * yx ** 2) * c2 * d2
+ (r ** 2 / u ** 2 * ju * yu - ri ** 2 / x ** 2 * jx * yx)
* (a2 * d2 + b2 * c2)
)
)
if e2.real < -1e6:
return I2
else:
v = self.samples.v(h ** 2, w, e2)
y = self.samples.y(h ** 2, w, e2)
kv = ssp.kv(n, v)
kpv = -ssp.kv(n + 1, v) + n / v * kv
iy = ssp.iv(n, y)
ipy = ssp.iv(n + 1, y) + n / y * iy
I1 = (
cmath.pi
* ri ** 2
* (
1
/ en
* (ipy ** 2 - (1 + n ** 2 / y ** 2) * iy ** 2 + 2 * ipy * iy / y)
* (y_te * a1 ** 2 + y_tm2 * b1 ** 2)
+ n * iy ** 2 / y ** 2 * (y_te + y_tm2) * a1 * b1
)
)
I3 = (
-cmath.pi
* r ** 2
* (
1
/ en
* (kpv ** 2 - (1 + n ** 2 / v ** 2) * kv ** 2 + 2 * kpv * kv / v)
* (y_te * a3 ** 2 + y_tm2 * b3 ** 2)
+ n * kv ** 2 / v ** 2 * (y_te + y_tm2) * a3 * b3
)
)
return I1 + I2 + I3
def coefs(self, hs, w):
A1s = []
B1s = []
A2s = []
B2s = []
C2s = []
D2s = []
A3s = []
B3s = []
for h, s, n, m in zip(hs, self.s_all, self.n_all, self.m_all):
pol = "E" if s == 0 else "M"
a1, b1, a2, b2, c2, d2, a3, b3 = self.coef(h, w, (pol, n, m))
A1s.append(a1)
B1s.append(b1)
A2s.append(a2)
B2s.append(b2)
C2s.append(c2)
D2s.append(d2)
A3s.append(a3)
B3s.append(b3)
return (
np.ascontiguousarray(A1s),
np.ascontiguousarray(B1s),
np.ascontiguousarray(A2s),
np.ascontiguousarray(B2s),
np.ascontiguousarray(C2s),
np.ascontiguousarray(D2s),
np.ascontiguousarray(A3s),
np.ascontiguousarray(B3s),
)
def Ys(self, w, hs, A1s, B1s, A2s, B2s, C2s, D2s, A3s, B3s):
vals = []
coefs = zip(A1s, B1s, A2s, B2s, C2s, D2s, A3s, B3s)
for h, s, n, coef in zip(hs, self.s_all, self.n_all, coefs):
pol = "E" if s == 0 else "M"
vals.append(self.Y(w, h, (pol, n, 1), coef))
return np.array(vals)
def props_numpy(self, w):
e1 = self.fill(w)
e2 = self.clad(w)
hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all])
A1s, B1s, A2s, B2s, C2s, D2s, A3s, B3s = self.coefs(hs, w)
Ys = self.Ys(w, hs, A1s, B1s, A2s, B2s, C2s, D2s, A3s, B3s)
xs = self.samples.x(hs ** 2, w, e1)
ys = self.samples.y(hs ** 2, w, e2)
us = self.samples.u(hs ** 2, w, e1)
vs = self.samples.v(hs ** 2, w, e2)
jxs = ssp.jv(self.n_all, xs)
jpxs = ssp.jvp(self.n_all, xs)
yxs = ssp.yv(self.n_all, xs)
ypxs = ssp.yvp(self.n_all, xs)
jus = ssp.jv(self.n_all, us)
jpus = ssp.jvp(self.n_all, us)
yus = ssp.yv(self.n_all, us)
ypus = ssp.yvp(self.n_all, us)
if e2.real < -1e6:
iys = np.inf * np.ones_like(ys)
ipys = np.inf * np.ones_like(ys)
kvs = np.zeros_like(vs)
kpvs = np.zeros_like(vs)
else:
iys = ssp.iv(self.n_all, ys)
ipys = ssp.ivp(self.n_all, ys)
kvs = ssp.kv(self.n_all, vs)
kpvs = ssp.kvp(self.n_all, vs)
return (
hs,
xs,
ys,
us,
vs,
jxs,
jpxs,
yxs,
ypxs,
iys,
ipys,
jus,
jpus,
yus,
ypus,
kvs,
kpvs,
A1s,
B1s,
A2s,
B2s,
C2s,
D2s,
A3s,
B3s,
Ys,
)
def props(self, w):
e1 = self.fill(w)
e2 = self.clad(w)
hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all])
(
xs,
ys,
us,
vs,
jxs,
jpxs,
yxs,
ypxs,
iys,
ipys,
jus,
jpus,
yus,
ypus,
kvs,
kpvs,
A1s,
B1s,
A2s,
B2s,
C2s,
D2s,
A3s,
B3s,
Ys,
) = coax_utils.props_cython(
w, self.r, self.ri, self.s_all, self.n_all, hs, e1, e2
)
return (
hs,
xs,
ys,
us,
vs,
jxs,
jpxs,
yxs,
ypxs,
iys,
ipys,
jus,
jpus,
yus,
ypus,
kvs,
kpvs,
A1s,
B1s,
A2s,
B2s,
C2s,
D2s,
A3s,
B3s,
Ys,
)
def e_field_r_dep(
self,
r: float,
w: complex,
alpha: tuple[str, int, int],
h: complex,
coef: tuple,
) -> np.ndarray:
"""Return the r-dependence of the electric field vectors in cylindrical coordinate
Args:
r: The distance from origin [um].
w: The angular frequency.
alpha: (pol, n, m)
pol: 'M' (TM-like mode) or 'E' (TE-like mode).
n: The order of the mode.
m: The sub order of the mode.
h: The complex phase constant.
coef: The coefficients of TE- and TM- components
Returns:
e_vec: An array of complexes [er, ep, ez].
"""
a1, b1, a2, b2, c2, d2, a3, b3 = coef
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
e1 = self.fill(w)
e2 = self.clad(w)
if r < self.ri:
_y = self.samples.y(h ** 2, w, e2)
yr = _y * r / self.ri
iy = ssp.iv(n, yr)
iy_plus = ssp.iv(n + 1, yr)
iy_minus = ssp.iv(n - 1, yr)
niy_y = (iy_minus - iy_plus) / 2
ipy = (iy_minus + iy_plus) / 2
er = niy_y * a1 + ipy * b1
ep = ipy * a1 + niy_y * b1
ez = -_y / (1j * h * self.ri) * iy * b1
elif self.ri <= r < self.r:
u = self.samples.u(h ** 2, w, e1)
ur = u * r / self.r
ju = ssp.jv(n, ur)
ju_plus = ssp.jv(n + 1, ur)
ju_minus = ssp.jv(n - 1, ur)
nju_u = (ju_minus + ju_plus) / 2
jpu = (ju_minus - ju_plus) / 2
yu = ssp.yv(n, ur)
yu_plus = ssp.yv(n + 1, ur)
yu_minus = ssp.yv(n - 1, ur)
nyu_u = (yu_minus + yu_plus) / 2
ypu = (yu_minus - yu_plus) / 2
er = nju_u * a2 + jpu * b2 + nyu_u * c2 + ypu * d2
ep = jpu * a2 + nju_u * b2 + ypu * c2 + nyu_u * d2
ez = u / (1j * h * self.r) * (ju * b2 + yu * d2)
else:
v = self.samples.v(h ** 2, w, e2)
vr = v * r / self.r
kv = ssp.kv(n, vr)
kv_plus = ssp.kv(n + 1, vr)
kv_minus = ssp.kv(n - 1, vr)
nkv_v = -(kv_minus - kv_plus) / 2
kpv = -(kv_minus + kv_plus) / 2
er = nkv_v * a3 + kpv * b3
ep = kpv * a3 + nkv_v * b3
ez = -v / (1j * h * self.r) * kv * b3
return np.array([er, ep, ez])
def e_field(
self,
x: float,
y: float,
w: complex,
dir: str,
alpha: tuple[str, int, int],
h: complex,
coef: tuple,
) -> np.ndarray:
"""Return the electric field vectors for the specified mode and point
Args:
x: The x coordinate [um].
y: The y coordinate [um].
dir: "h" (horizontal polarization) or "v" (vertical polarization)
w: The angular frequency.
alpha: (pol, n, m)
pol: 'M' (TM-like mode) or 'E' (TE-like mode).
n: The order of the mode.
m: The sub order of the mode.
h: The complex phase constant.
coef: The coefficients of TE- and TM- components
Returns:
e_vec: An array of complexes [ex, ey, ez].
"""
pol, n, m = alpha
r = np.hypot(x, y)
p = np.arctan2(y, x)
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
if dir == "h":
fr = np.cos(n * p)
fp = -np.sin(n * p)
else:
fr = np.sin(n * p)
fp = np.cos(n * p)
er, ep, ez = self.e_field_r_dep(r, w, alpha, h, coef)
er *= fr
ep *= fp
ez *= fr
ex = er * np.cos(p) - ep * np.sin(p)
ey = er * np.sin(p) + ep * np.cos(p)
return np.array([ex, ey, ez])
def h_field_r_dep(
self,
r: float,
w: complex,
alpha: tuple[str, int, int],
h: complex,
coef: tuple,
) -> np.ndarray:
"""Return the r-dependence of the magnetic field vectors in cylindrical coordinate
Args:
r: The distance from origin [um].
w: The angular frequency.
alpha: (pol, n, m)
pol: 'M' (TM-like mode) or 'E' (TE-like mode).
n: The order of the mode.
m: The sub order of the mode.
h: The complex phase constant.
coef: The coefficients of TE- and TM- components
Returns:
e_vec: An array of complexes [er, ep, ez].
"""
a1, b1, a2, b2, c2, d2, a3, b3 = coef
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
e1 = self.fill(w)
e2 = self.clad(w)
y_te = Coax.y_te(w, h)
if r < self.ri:
y_tm = self.y_tm_clad(w, h)
_y = self.samples.y(h ** 2, w, e2)
yr = _y * r / self.ri
iy = ssp.iv(n, yr)
iy_plus = ssp.iv(n + 1, yr)
iy_minus = ssp.iv(n - 1, yr)
niy_y = (iy_minus - iy_plus) / 2
ipy = (iy_minus + iy_plus) / 2
hr = -(y_te * ipy * a1 + y_tm * niy_y * b1)
hp = y_te * niy_y * a1 + y_tm * ipy * b1
hz = -1j * _y / self.ri * iy * a1
elif self.ri <= r < self.r:
y_tm = self.y_tm_core(w, h)
u = self.samples.u(h ** 2, w, e1)
ur = u * r / self.r
ju = ssp.jv(n, ur)
ju_plus = ssp.jv(n + 1, ur)
ju_minus = ssp.jv(n - 1, ur)
nju_u = (ju_minus + ju_plus) / 2
jpu = (ju_minus - ju_plus) / 2
yu = ssp.yv(n, ur)
yu_plus = ssp.yv(n + 1, ur)
yu_minus = ssp.yv(n - 1, ur)
nyu_u = (yu_minus + yu_plus) / 2
ypu = (yu_minus - yu_plus) / 2
hr = -(
y_te * jpu * a2
+ y_tm * nju_u * b2
+ y_te * ypu * c2
+ y_tm * nyu_u * d2
)
hp = (
y_te * nju_u * a2
+ y_tm * jpu * b2
+ y_te * nyu_u * c2
+ y_tm * ypu * d2
)
hz = 1j * u / self.r * (ju * a2 + yu * c2)
else:
y_tm = self.y_tm_clad(w, h)
v = self.samples.v(h ** 2, w, e2)
vr = v * r / self.r
kv = ssp.kv(n, vr)
kv_plus = ssp.kv(n + 1, vr)
kv_minus = ssp.kv(n - 1, vr)
nkv_v = -(kv_minus - kv_plus) / 2
kpv = -(kv_minus + kv_plus) / 2
hr = -(y_te * kpv * a3 + y_tm * nkv_v * b3)
hp = y_te * nkv_v * a3 + y_tm * kpv * b3
hz = -1j * v / self.r * kv * a3
return np.array([hr, hp, hz])
def h_field(self, x, y, w, dir, alpha, h, coef):
"""Return the magnetic field vectors for the specified mode and
point
Args:
x: A float indicating the x coordinate [um]
y: A float indicating the y coordinate [um]
w: A complex indicating the angular frequency
dir: "h" (horizontal polarization) or "v" (vertical polarization)
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
h: A complex indicating the phase constant.
coef: The coefficients of TE- and TM- components
Returns:
h_vec: An array of complexes [hx, hy, hz].
"""
pol, n, m = alpha
r = np.hypot(x, y)
p = np.arctan2(y, x)
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
if dir == "h":
fr = np.cos(n * p)
fp = -np.sin(n * p)
else:
fr = np.sin(n * p)
fp = np.cos(n * p)
hr, hp, hz = self.h_field_r_dep(r, w, alpha, h, coef)
hr *= fp
hp *= fr
hz *= fp
hx = hr * np.cos(p) - hp * np.sin(p)
hy = hr * np.sin(p) + hp * np.cos(p)
return np.array([hx, hy, hz])
def field_r_dep(
self,
r: float,
w: complex,
alpha: tuple[str, int, int],
h: complex,
coef: tuple,
) -> np.ndarray:
"""Return the r-dependence of the field vectors in cylindrical coordinate
Args:
r: The distance from origin [um].
w: The angular frequency.
alpha: (pol, n, m)
pol: 'M' (TM-like mode) or 'E' (TE-like mode).
n: The order of the mode.
m: The sub order of the mode.
h: The complex phase constant.
coef: The coefficients of TE- and TM- components
Returns:
e_vec: An array of complexes [er, ep, ez].
"""
a1, b1, a2, b2, c2, d2, a3, b3 = coef
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
e1 = self.fill(w)
e2 = self.clad(w)
y_te = Coax.y_te(w, h)
if r < self.ri:
y_tm = self.y_tm_clad(w, h)
_y = self.samples.y(h ** 2, w, e2)
yr = _y * r / self.ri
iy = ssp.iv(n, yr)
iy_plus = ssp.iv(n + 1, yr)
iy_minus = ssp.iv(n - 1, yr)
niy_y = (iy_minus - iy_plus) / 2
ipy = (iy_minus + iy_plus) / 2
er = niy_y * a1 + ipy * b1
ep = ipy * a1 + niy_y * b1
ez = -_y / (1j * h * self.ri) * iy * b1
hr = -(y_te * ipy * a1 + y_tm * niy_y * b1)
hp = y_te * niy_y * a1 + y_tm * ipy * b1
hz = -1j * _y / self.ri * iy * a1
elif self.ri <= r < self.r:
y_tm = self.y_tm_core(w, h)
u = self.samples.u(h ** 2, w, e1)
ur = u * r / self.r
ju = ssp.jv(n, ur)
ju_plus = ssp.jv(n + 1, ur)
ju_minus = ssp.jv(n - 1, ur)
nju_u = (ju_minus + ju_plus) / 2
jpu = (ju_minus - ju_plus) / 2
yu = ssp.yv(n, ur)
yu_plus = ssp.yv(n + 1, ur)
yu_minus = ssp.yv(n - 1, ur)
nyu_u = (yu_minus + yu_plus) / 2
ypu = (yu_minus - yu_plus) / 2
er = nju_u * a2 + jpu * b2 + nyu_u * c2 + ypu * d2
ep = jpu * a2 + nju_u * b2 + ypu * c2 + nyu_u * d2
ez = u / (1j * h * self.r) * (ju * b2 + yu * d2)
hr = -(
y_te * jpu * a2
+ y_tm * nju_u * b2
+ y_te * ypu * c2
+ y_tm * nyu_u * d2
)
hp = (
y_te * nju_u * a2
+ y_tm * jpu * b2
+ y_te * nyu_u * c2
+ y_tm * ypu * d2
)
hz = 1j * u / self.r * (ju * a2 + yu * c2)
else:
y_tm = self.y_tm_clad(w, h)
v = self.samples.v(h ** 2, w, e2)
vr = v * r / self.r
kv = ssp.kv(n, vr)
kv_plus = ssp.kv(n + 1, vr)
kv_minus = ssp.kv(n - 1, vr)
nkv_v = -(kv_minus - kv_plus) / 2
kpv = -(kv_minus + kv_plus) / 2
er = nkv_v * a3 + kpv * b3
ep = kpv * a3 + nkv_v * b3
ez = -v / (1j * h * self.r) * kv * b3
hr = -(y_te * kpv * a3 + y_tm * nkv_v * b3)
hp = y_te * nkv_v * a3 + y_tm * kpv * b3
hz = -1j * v / self.r * kv * a3
return np.array([er, ep, ez, hr, hp, hz])
def fields(self, x, y, w, dir, alpha, h, coef):
"""Return the electromagnetic field vectors for the specified mode and
point
Args:
x: A float indicating the x coordinate [um]
y: A float indicating the y coordinate [um]
w: A complex indicating the angular frequency
dir: "h" (horizontal polarization) or "v" (vertical polarization)
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
h: A complex indicating the phase constant.
coef: The coefficients of TE- and TM- components
Returns:
f_vec: An array of complexes [ex, ey, ez, hx, hy, hz].
"""
pol, n, m = alpha
r = np.hypot(x, y)
p = np.arctan2(y, x)
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
if dir == "h":
fr = np.cos(n * p)
fp = -np.sin(n * p)
else:
fr = np.sin(n * p)
fp = np.cos(n * p)
er, ep, ez, hr, hp, hz = self.field_r_dep(r, w, alpha, h, coef)
er *= fr
ep *= fp
ez *= fr
hr *= fp
hp *= fr
hz *= fp
ex = er * np.cos(p) - ep * np.sin(p)
ey = er * np.sin(p) + ep * np.cos(p)
hx = hr * np.cos(p) - hp * np.sin(p)
hy = hr * np.sin(p) + hp * np.cos(p)
return np.array([ex, ey, ez, hx, hy, hz])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ConvNeXt models.
"""
from __future__ import annotations
from functools import partial
from typing import Optional
from typing import Union
import torch
import torch.nn.functional as F
from torch import nn
from torch import Size
from torch import Tensor
from torch.nn.init import trunc_normal_
from onevision.factory import BACKBONES
from onevision.factory import IMAGE_CLASSIFICATION
from onevision.factory import MODELS
from onevision.models.classification.image_classifier import ImageClassifier
from onevision.nn import DropPath
from onevision.type import Indexes
from onevision.type import ListOrTupleAnyT
from onevision.type import Pretrained
from onevision.type import Tensors
__all__ = [
"ConvNeXt",
"ConvNeXtTiny",
"ConvNeXtSmall",
"ConvNeXtBase",
"ConvNeXtLarge",
"ConvNeXtXLarge",
]
# MARK: - Modules
class LayerNorm(nn.Module):
r"""LayerNorm that supports two data formats: channels_last (default) or
channels_first. The ordering of the dimensions in the inputs. channels_last
corresponds to inputs with shape (batch_size, height, width, channels)
while channels_first corresponds to inputs with shape
(batch_size, channels, height, width).
"""
def __init__(
self,
normalized_shape: Union[int, list[int], Size],
eps : float = 1e-6,
data_format : str = "channels_last"
):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape, )
def forward(self, x: Tensor):
if self.data_format == "channels_last":
return F.layer_norm(
x, self.normalized_shape, self.weight, self.bias, self.eps
)
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Block(nn.Module):
r"""ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch.
Args:
dim (int):
Number of input channels.
drop_path (float):
Stochastic depth rate. Default: `0.0`.
layer_scale_init_value (float):
Init value for Layer Scale. Default: `1e-6`.
"""
def __init__(
self,
dim : int,
drop_path : float = 0.0,
layer_scale_init_value: float = 1e-6
):
super().__init__()
# depthwise conv
self.dw_conv = nn.Conv2d(
dim, dim, kernel_size=(7, 7), padding=3, groups=dim
)
self.norm = LayerNorm(dim, eps=1e-6)
# pointwise/1x1 convs, implemented with linear layers
self.pw_conv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pw_conv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(
layer_scale_init_value * torch.ones(dim),
requires_grad=True
) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x: Tensor) -> Tensor:
x = self.dw_conv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pw_conv1(x)
x = self.act(x)
x = self.pw_conv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = x + self.drop_path(x)
return x
# MARK: - ConvNeXt
cfgs = {
"convnext_tiny": {
"in_channels" : 3,
"out_channels" : [96, 192, 384, 768],
"depths" : [3, 3, 9, 3],
"drop_path_rate" : 0.1,
"layer_scale_init_value": 1e-6,
"head_init_scale" : 1.0,
},
"convnext_small": {
"in_channels" : 3,
"out_channels" : [96, 192, 384, 768],
"depths" : [3, 3, 27, 3],
"drop_path_rate" : 0.0,
"layer_scale_init_value": 1e-6,
"head_init_scale" : 1.0,
},
"convnext_base": {
"in_channels" : 3,
"out_channels" : [128, 256, 512, 1024],
"depths" : [3, 3, 27, 3],
"drop_path_rate" : 0.0,
"layer_scale_init_value": 1e-6,
"head_init_scale" : 1.0,
},
"convnext_large": {
"in_channels" : 3,
"out_channels" : [192, 384, 768, 1536],
"depths" : [3, 3, 27, 3],
"drop_path_rate" : 0.0,
"layer_scale_init_value": 1e-6,
"head_init_scale" : 1.0,
},
"convnext_xlarge": {
"in_channels" : 3,
"out_channels" : [256, 512, 1024, 2048],
"depths" : [3, 3, 27, 3],
"drop_path_rate" : 0.0,
"layer_scale_init_value": 1e-6,
"head_init_scale" : 1.0,
},
}
@MODELS.register(name="convnext")
@IMAGE_CLASSIFICATION.register(name="convnext")
@BACKBONES.register(name="convnext")
class ConvNeXt(ImageClassifier):
"""ConvNeXt backbone.
Args:
basename (str, optional):
Model basename. Default: `convnext`.
name (str, optional):
Model name. Default: `convnext`.
num_classes (int, optional):
Number of classes for classification. Default: `None`.
out_indexes (Indexes):
List of output tensors taken from specific layers' indexes.
If `>= 0`, return the ith layer's output.
If `-1`, return the final layer's output. Default: `-1`.
pretrained (Pretrained):
Use pretrained weights. If `True`, returns a model pre-trained on
ImageNet. If `str`, load weights from saved file. Default: `True`.
- If `True`, returns a model pre-trained on ImageNet.
- If `str` and is a weight file(path), then load weights from
saved file.
- In each inherited model, `pretrained` can be a dictionary's
key to get the corresponding local file or url of the weight.
"""
# MARK: Magic Functions
def __init__(
self,
# Hyperparameters
in_channels : int = 3,
out_channels : ListOrTupleAnyT[int] = (96, 192, 384, 768),
depths : ListOrTupleAnyT[int] = (3, 3, 9, 3),
drop_path_rate : float = 0.1,
layer_scale_init_value: float = 1e-6,
head_init_scale : Optional[float] = 1.0,
# BaseModel's args
basename : Optional[str] = "convnext",
name : Optional[str] = "convnext",
num_classes : Optional[int] = None,
out_indexes : Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
super().__init__(
basename = basename,
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
# NOTE: Get Hyperparameters
self.in_channels = in_channels
self.out_channels = out_channels
self.depths = depths
self.drop_path_rate = drop_path_rate
self.layer_scale_init_value = layer_scale_init_value
self.head_init_scale = head_init_scale
# Stem and 3 intermediate downsampling conv layers
self.downsample_layers = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(self.in_channels, self.out_channels[0], (4, 4), (4, 4)),
LayerNorm(self.out_channels[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(self.out_channels[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(self.out_channels[i], self.out_channels[i + 1], (2, 2), (2, 2)),
)
self.downsample_layers.append(downsample_layer)
# 4 feature resolution stages, each consisting of multiple residual blocks
self.stages = nn.ModuleList()
dp_rates = [
x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))
]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=self.out_channels[i], drop_path=dp_rates[cur + j],
layer_scale_init_value=self.layer_scale_init_value)
for j in range(self.depths[i])]
)
self.stages.append(stage)
cur += self.depths[i]
# Norm layer of forward features
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(self.out_channels[i_layer])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
# NOTE: Classifier
self.norm = nn.LayerNorm(self.out_channels[-1], eps=1e-6)
self.head = self.create_classifier(self.out_channels[-1], self.num_classes)
# NOTE: Load Pretrained
if self.pretrained:
self.load_pretrained()
else:
self.apply(self.init_weights)
if not isinstance(self.head, nn.Identity):
self.head.weight.data.mul_(self.head_init_scale)
self.head.bias.data.mul_(self.head_init_scale)
# NOTE: Alias
self.classifier = self.head
# MARK: Configure
@staticmethod
def create_classifier(
num_features: int, num_classes: Optional[int]
) -> nn.Module:
if num_classes and num_classes > 0:
classifier = nn.Linear(num_features, num_classes)
else:
classifier = nn.Identity()
return classifier
def init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=0.02)
nn.init.constant_(m.bias, 0)
# MARK: Forward Pass
def forward_once(self, x: Tensor, *args, **kwargs) -> Tensor:
"""Forward pass once. Implement the logic for a single forward pass.
Args:
x (Tensor):
Input of shape [B, C, H, W].
Returns:
yhat (Tensor):
Predictions.
"""
x = self.forward_features(x)
if isinstance(x, Tensor):
# global average pooling, (N, C, H, W) -> (N, C)
x = self.norm(x.mean([-2, -1]))
x = self.classifier(x)
return x
def forward_features(
self, x: Tensor, out_indexes: Optional[Indexes] = None
) -> Tensors:
"""Forward pass for features extraction.
Args:
x (Tensor):
Input image.
out_indexes (Indexes, optional):
List of layers' indexes to extract features. This is called
in `forward_features()` and is useful when the model
is used as a component in another model.
- If is a `tuple` or `list`, return an array of features.
- If is a `int`, return only the feature from that layer's
index.
- If is `-1`, return the last layer's output.
Default: `None`.
"""
out_indexes = self.out_indexes if out_indexes is None else out_indexes
yhat = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
if isinstance(out_indexes, (tuple, list)) and (i in out_indexes):
norm_layer = getattr(self, f"norm{i}")
output = norm_layer(x)
yhat.append(output)
elif isinstance(out_indexes, int) and (i == out_indexes):
norm_layer = getattr(self, f"norm{i}")
output = norm_layer(x)
return output
elif out_indexes is None or out_indexes == -1:
yhat = x
return yhat
# MARK: - ConvNeXtTiny
@MODELS.register(name="convnext_t")
@MODELS.register(name="convnext_tiny")
@IMAGE_CLASSIFICATION.register(name="convnext_t")
@IMAGE_CLASSIFICATION.register(name="convnext_tiny")
@BACKBONES.register(name="convnext_t")
@BACKBONES.register(name="convnext_tiny")
class ConvNeXtTiny(ConvNeXt):
model_zoo = {
"imagenet_1k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
file_name="convnext_tiny_imagenet_1k.pth", num_classes=1000,
),
}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "convnext_tiny",
out_indexes: Indexes = -1,
num_classes: Optional[int] = None,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["convnext_tiny"] | kwargs
super().__init__(
name = name,
out_indexes = out_indexes,
num_classes = num_classes,
pretrained = pretrained,
*args, **kwargs
)
# MARK: - ConvNeXtSmall
@MODELS.register(name="convnext_s")
@MODELS.register(name="convnext_small")
@IMAGE_CLASSIFICATION.register(name="convnext_s")
@IMAGE_CLASSIFICATION.register(name="convnext_small")
@BACKBONES.register(name="convnext_s")
@BACKBONES.register(name="convnext_small")
class ConvNeXtSmall(ConvNeXt):
model_zoo = {
"imagenet_1k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
file_name="convnext_small_imagenet_1k.pth", num_classes=1000,
),
}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "convnext_small",
out_indexes: Indexes = -1,
num_classes: Optional[int] = None,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["convnext_small"] | kwargs
super().__init__(
name = name,
out_indexes = out_indexes,
num_classes = num_classes,
pretrained = pretrained,
*args, **kwargs
)
# MARK: - ConvNeXtBase
@MODELS.register(name="convnext_b")
@MODELS.register(name="convnext_base")
@IMAGE_CLASSIFICATION.register(name="convnext_b")
@IMAGE_CLASSIFICATION.register(name="convnext_base")
@BACKBONES.register(name="convnext_b")
@BACKBONES.register(name="convnext_base")
class ConvNeXtBase(ConvNeXt):
model_zoo = {
"imagenet_1k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
file_name="convnext_base_imagenet_1k.pth", num_classes=1000,
),
"imagenet_22k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
file_name="convnext_base_imagenet_22k.pth", num_classes=22000,
),
}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "convnext_base",
out_indexes: Indexes = -1,
num_classes: Optional[int] = None,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["convnext_base"] | kwargs
super().__init__(
name = name,
out_indexes = out_indexes,
num_classes = num_classes,
pretrained = pretrained,
*args, **kwargs
)
# MARK: - ConvNeXtLarge
@MODELS.register(name="convnext_l")
@MODELS.register(name="convnext_large")
@IMAGE_CLASSIFICATION.register(name="convnext_l")
@IMAGE_CLASSIFICATION.register(name="convnext_large")
@BACKBONES.register(name="convnext_l")
@BACKBONES.register(name="convnext_large")
class ConvNeXtLarge(ConvNeXt):
model_zoo = {
"imagenet_1k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
file_name="convnext_large_imagenet_1k.pth", num_classes=1000,
),
"imagenet_22k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
file_name="convnext_large_imagenet_22k.pth", num_classes=22000,
),
}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "convnext_large",
out_indexes: Indexes = -1,
num_classes: Optional[int] = None,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["convnext_large"] | kwargs
super().__init__(
name = name,
out_indexes = out_indexes,
num_classes = num_classes,
pretrained = pretrained,
*args, **kwargs
)
# MARK: - ConvNeXtXLarge
@MODELS.register(name="convnext_xl")
@MODELS.register(name="convnext_xlarge")
@IMAGE_CLASSIFICATION.register(name="convnext_xl")
@IMAGE_CLASSIFICATION.register(name="convnext_xlarge")
@BACKBONES.register(name="convnext_xl")
@BACKBONES.register(name="convnext_xlarge")
class ConvNeXtXLarge(ConvNeXt):
model_zoo = {
"imagenet_22k": dict(
path="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
file_name="convnext_xlarge_imagenet_22k.pth", num_classes=22000,
),
}
# MARK: Magic Functions
def __init__(
self,
# BaseModel's args
name : Optional[str] = "convnext_xlarge",
out_indexes: Indexes = -1,
num_classes: Optional[int] = None,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs = cfgs["convnext_large"] | kwargs
super().__init__(
name = name,
out_indexes = out_indexes,
num_classes = num_classes,
pretrained = pretrained,
*args, **kwargs
)
|
# -*- coding: utf-8 -*-
"""Neural_Machine_Translation_Attention.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1f7uW5_n27jMXhZBpwll0eyHF543SVmRm
# Neural Machine Translation with Attention
- This uses seq2seq model.
- RNN based encoder. Again a RNN based decoder.
- We also have attention mechanism which focuses on words that are important.
"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
"""# Make Dataset
Download and prepare the dataset
We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:
May I borrow this book? ¿Puedo tomar prestado este libro?
fter downloading the dataset, here are the steps we'll take to prepare the data:
- Add a start and end token to each sentence.
- Clean the sentences by removing special characters.
- Create a word index and reverse word index (dictionaries mapping from word → id and id → word).
- Pad each sentence to a maximum length.
"""
path_to_zip = tf.keras.utils.get_file('spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
print(path_to_file)
"""# Data Preprocessing"""
# Convert unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
en_sentence = u"May I borrow this book ?"
sp_sentence = u"¿Puedo tomar prestado este libro?"
print(preprocess_sentence(en_sentence))
print(preprocess_sentence(sp_sentence).encode('utf-8'))
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return(zip(*word_pairs))
en, sp = create_dataset(path_to_file, None)
print(en[-1])
print(sp[-1])
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
# print(tensor.shape)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')
# print(tensor.shape)
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None):
targ_lang, inp_lang = create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
"""Limit the size of the dataset to experiment faster (optional)
Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data):
"""
num_examples = 30000
input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer = load_dataset(path_to_file, num_examples=num_examples)
print(input_tensor.shape)
print(target_tensor.shape)
print(inp_lang_tokenized)
max_length_target = max_length(target_tensor)
max_length_inp = max_length(input_tensor)
print(max_length_target)
print(max_length_inp)
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# Show length
print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))
"""# Let's see after preprocessing
- Our aim should be converting the from these tokenized words from one language to other.
"""
def convert(lang, tensor):
for t in tensor:
if t != 0:
print("%d ------> %s" %(t, lang.index_word[t]))
print("Input Language: tokenized index")
convert(inp_lang_tokenizer, input_tensor_train[0])
print()
print("Target language: tokenized index")
convert(targ_lang_tokenizer, target_tensor_train[0])
"""# Create a tf.data Dataset object"""
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
steps_per_epoch = len(input_tensor_train) // BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang_tokenizer.word_index) + 1
vocab_tar_size = len(targ_lang_tokenizer.word_index) + 1
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
example_input_batch, example_target_batch = next(iter(dataset))
example_input_batch.shape, example_target_batch.shape
"""# Time for enocder + decoder model
# Enocder
Implement an encoder-decoder model with attention which you can read about in the TensorFlow Neural Machine Translation (seq2seq) tutorial. This example uses a more recent set of APIs.
This notebook implements the attention equations from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence.
The below picture and formulas are an example of attention mechanism from Luong's paper.

The input is put through an encoder model which gives us the encoder output of shape (batch_size, max_length, hidden_size) and the encoder hidden state of shape (batch_size, hidden_size).
Here are the equations that are implemented:

his tutorial uses Bahdanau attention for the encoder. Let's decide on notation before writing the simplified form:
FC = Fully connected (dense) layer
EO = Encoder output
H = hidden state
X = input to the decoder
And the pseudo-code:
score = FC(tanh(FC(EO) + FC(H)))
attention weights = softmax(score, axis = 1). Softmax by default is applied on the last axis but here we want to apply it on the 1st axis, since the shape of score is (batch_size, max_length, hidden_size). Max_length is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.
context vector = sum(attention weights * EO, axis = 1). Same reason as above for choosing axis as 1.
embedding output = The input to the decoder X is passed through an embedding layer.
merged vector = concat(embedding output, context vector)
This merged vector is then given to the GRU
The shapes of all the vectors at each step have been specified in the comments in the code:
"""
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super().__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state=hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))
print(example_input_batch.shape)
print(sample_hidden.shape)
print(vocab_inp_size)
print(vocab_tar_size)
"""# Adding Attention"""
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super().__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
# we are doing this to broadcast addition along the time axis to calculate the score
query_with_time_axis = tf.expand_dims(query, axis=1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(self.W1(query_with_time_axis) + self.W2(values)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
attention_layer = BahdanauAttention(10)
attention_result, attention_weights = attention_layer(sample_hidden, sample_output)
print("Attention result shape: (batch size, units) {}".format(attention_result.shape))
print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super().__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
sample_decoder_output, _, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)),sample_hidden, sample_output)
print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))
"""# Configure optimizer, loss fn and train"""
optimizer = tf.keras.optimizers.Adam()
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
"""# Training
- Pass the input through the encoder which return encoder output and the encoder hidden state.
- The encoder output, encoder hidden state and the decoder input (which is the start token) is passed to the decoder.
- The decoder returns the predictions and the decoder hidden state.
- The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.
* Use teacher forcing to decide the next input to the decoder.
* Teacher forcing is the technique where the target word is passed as the next input to the decoder.
* The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
"""
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = enocder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang_tokenizer.word_index['<start>']] * BATCH_SIZE, 1)
# Use teacher forcing - feed the target as next input not predictions of model
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# Use the teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / steps_per_epoch))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
"""# Translate
The evaluate function is similar to the training loop, except we don't use teacher forcing here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.
Stop predicting when the model predicts the end token.
And store the attention weights for every time step.
"""
def evaluate(sentence):
attention_plot = np.zeros((max_length_target, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang_tokenizer.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang_tokenizer.word_index['<start>']], 0)
for t in range(max_length_target):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
# storing the attention weights to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang_tokenizer.index_word[predicted_id] + ' '
if targ_lang_tokenizer.index_word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# function for plotting the attention weights
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def translate(sentence):
result, sentence, attention_plot = evaluate(sentence)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate(u'hace mucho frio aqui.')
translate(u'esta es mi vida.')
translate(u'hace mucho frio aqui.')
translate(u'¿todavia estan en casa?')
translate(u'trata de averiguarlo.')
|
"""
Module:
unicon.plugins.generic
Authors:
pyATS TEAM (pyats-support@cisco.com, pyats-support-ext@cisco.com)
Description:
Module for defining all the Statements and callback required for the
Current implementation
"""
import re
from time import sleep
from unicon.eal.dialogs import Statement
from unicon.eal.helpers import sendline
from unicon.core.errors import UniconAuthenticationError
from unicon.utils import Utils
from unicon.plugins.generic.patterns import GenericPatterns
from unicon.plugins.utils import (
get_current_credential,
common_cred_username_handler,
common_cred_password_handler,
)
from unicon.utils import to_plaintext
from unicon.bases.routers.connection import ENABLE_CRED_NAME
pat = GenericPatterns()
utils = Utils()
#############################################################
# Callbacks
#############################################################
def connection_refused_handler(spawn):
""" handles connection refused scenarios
"""
raise Exception('Connection refused to device %s' % (str(spawn),))
def connection_failure_handler(spawn, err):
raise Exception(err)
def chatty_term_wait(spawn, trim_buffer=False):
""" Wait a small amount of time for any chatter to cease from the device.
"""
prev_buf_len = len(spawn.buffer)
for _ in range(
spawn.settings.ESCAPE_CHAR_CHATTY_TERM_WAIT_RETRIES):
sleep(spawn.settings.ESCAPE_CHAR_CHATTY_TERM_WAIT)
spawn.read_update_buffer()
cur_buf_len = len(spawn.buffer)
if prev_buf_len == cur_buf_len:
break
else:
prev_buf_len = cur_buf_len
if trim_buffer:
spawn.trim_buffer()
def escape_char_callback(spawn):
""" Wait a small amount of time for terminal chatter to cease before
attempting to obtain prompt, do not attempt to obtain prompt if login message is seen.
"""
chatty_term_wait(spawn)
# Device is already asking for authentication
if re.search(r'.*(User Access Verification|sername:\s*$|assword:\s*$|login:\s*$)', spawn.buffer):
return
auth_pat = ''
if spawn.settings.LOGIN_PROMPT:
auth_pat = spawn.settings.LOGIN_PROMPT
if spawn.settings.PASSWORD_PROMPT:
if auth_pat:
auth_pat += '|' + spawn.settings.PASSWORD_PROMPT
else:
auth_pat = spawn.settings.PASSWORD_PROMPT
if auth_pat and re.search(auth_pat, spawn.buffer):
return
# try and get to the first prompt
# best effort handling of network delays and connection establishing
# store current know buffer
known_buffer = len(spawn.buffer.strip())
for retry_number in range(spawn.settings.ESCAPE_CHAR_PROMPT_WAIT_RETRIES):
# hit enter
spawn.sendline()
spawn.read_update_buffer()
# incremental sleep logic
sleep(spawn.settings.ESCAPE_CHAR_PROMPT_WAIT * (retry_number + 1))
# did we get prompt after?
spawn.read_update_buffer()
# check buffer
if known_buffer != len(spawn.buffer.strip()):
# we got new stuff - assume it's the the prompt, get out
break
def ssh_continue_connecting(spawn):
""" handles SSH new key prompt
"""
sleep(0.1)
spawn.sendline('yes')
def login_handler(spawn, context, session):
""" handles login prompt
"""
credential = get_current_credential(context=context, session=session)
if credential:
common_cred_username_handler(
spawn=spawn, context=context, credential=credential)
else:
spawn.sendline(context['username'])
session['tacacs_login'] = 1
def user_access_verification(session):
# Enable the tacacs_login flag
session['tacacs_login'] = 1
def get_enable_credential_password(context):
""" Get the enable password from the credentials.
1. If there is a previous credential (the last credential used to respond to
a password prompt), use its enable_password member if it exists.
2. Otherwise, if the user specified a list of credentials, pick the final one in the list and
use its enable_password member if it exists.
3. Otherwise, if there is a default credential, use its enable_password member if it exists.
4. Otherwise, use the well known "enable" credential, password member if it exists.
5. Otherwise, use the default credential "password" member if it exists.
6. Otherwise, raise error that no enable password could be found.
"""
credentials = context.get('credentials')
enable_credential_password = ""
login_creds = context.get('login_creds', [])
fallback_cred = context.get('default_cred_name', "")
if not login_creds:
login_creds = [fallback_cred]
if not isinstance(login_creds, list):
login_creds = [login_creds]
# Pick the last item in the login_creds list to select the intended
# credential even if the device does not ask for a password on login
# and the given credential is not consumed.
final_credential = login_creds[-1] if login_creds else ""
if credentials:
enable_pw_checks = [
(context.get('previous_credential', ""), 'enable_password'),
(final_credential, 'enable_password'),
(fallback_cred, 'enable_password'),
(ENABLE_CRED_NAME, 'password'),
(context.get('default_cred_name', ""), 'password'),
]
for cred_name, key in enable_pw_checks:
if cred_name:
candidate_enable_pw = credentials.get(cred_name, {}).get(key)
if candidate_enable_pw:
enable_credential_password = candidate_enable_pw
break
else:
raise UniconAuthenticationError('{}: Could not find an enable credential.'.
format(context.get('hostname', "")))
return to_plaintext(enable_credential_password)
def enable_password_handler(spawn, context, session):
if 'password_attempts' not in session:
session['password_attempts'] = 1
else:
session['password_attempts'] += 1
if session.password_attempts > spawn.settings.PASSWORD_ATTEMPTS:
raise UniconAuthenticationError('Too many enable password retries')
enable_credential_password = get_enable_credential_password(context=context)
if enable_credential_password:
spawn.sendline(enable_credential_password)
else:
spawn.sendline(context['enable_password'])
def ssh_tacacs_handler(spawn, context):
result = False
start_cmd = spawn.spawn_command
if context.get('username'):
if re.search(context['username'] + r'@', start_cmd) \
or re.search(r'-l\s*' + context['username'], start_cmd) \
or re.search(context['username'] + r'@', spawn.buffer):
result = True
return result
def password_handler(spawn, context, session):
""" handles password prompt
"""
credential = get_current_credential(context=context, session=session)
if credential:
common_cred_password_handler(
spawn=spawn, context=context, credential=credential,
session=session)
else:
if 'password_attempts' not in session:
session['password_attempts'] = 1
else:
session['password_attempts'] += 1
if session.password_attempts > spawn.settings.PASSWORD_ATTEMPTS:
raise UniconAuthenticationError('Too many password retries')
if context.get('username', '') == spawn.last_sent.rstrip() or ssh_tacacs_handler(spawn, context):
spawn.sendline(context['tacacs_password'])
else:
spawn.sendline(context['line_password'])
def passphrase_handler(spawn, context, session):
""" Handles SSH passphrase prompt """
credential = get_current_credential(context=context, session=session)
try:
spawn.sendline(to_plaintext(
context['credentials'][credential]['passphrase']))
except KeyError:
raise UniconAuthenticationError("No passphrase found "
"for credential {}.".format(credential))
def bad_password_handler(spawn):
""" handles bad password prompt
"""
raise UniconAuthenticationError('Bad Password sent to device %s' % (str(spawn),))
def incorrect_login_handler(spawn, context, session):
# In nxos device if the first attempt password prompt occur before
# username prompt, it will get Login incorrect error.
# Reset the cred_iter to try again
if 'incorrect_login_attempts' not in session:
session.pop('cred_iter', None)
credential = get_current_credential(context=context, session=session)
if credential and 'incorrect_login_attempts' in session:
# If credentials have been supplied, there are no login retries.
# The user must supply appropriate credentials to ensure login
# does not fail. Skip it for the first attempt
raise UniconAuthenticationError(
'Login failure, either wrong username or password')
else:
if 'incorrect_login_attempts' not in session:
session['incorrect_login_attempts'] = 1
# Let's give a chance for unicon to login with right credentials
# let's give three attempts
if session['incorrect_login_attempts'] <= 3:
session['incorrect_login_attempts'] = \
session['incorrect_login_attempts'] + 1
else:
raise UniconAuthenticationError(
'Login failure, either wrong username or password')
def sudo_password_handler(spawn, context, session):
""" Password handler for sudo command
"""
if 'sudo_attempts' not in session:
session['sudo_attempts'] = 1
else:
raise UniconAuthenticationError('sudo failure')
credentials = context.get('credentials')
if credentials:
try:
spawn.sendline(
to_plaintext(credentials['sudo']['password']))
except KeyError:
raise UniconAuthenticationError("No password has been defined "
"for sudo credential.")
else:
raise UniconAuthenticationError("No credentials has been defined for sudo.")
def wait_and_enter(spawn):
sleep(0.5) # otherwise newline is sometimes lost?
spawn.sendline()
def more_prompt_handler(spawn):
output = utils.remove_backspace(spawn.match.match_output)
all_more = re.findall(spawn.settings.MORE_REPLACE_PATTERN, output)
spawn.match.match_output = ''.join(output.rsplit(all_more[-1], 1))
spawn.send(spawn.settings.MORE_CONTINUE)
def custom_auth_statements(login_pattern=None, password_pattern=None):
""" Return list of Statements based on login_pattern and password_prompt."""
stmt_list = []
if login_pattern:
login_stmt = Statement(pattern=login_pattern,
action=login_handler,
args=None,
loop_continue=True,
continue_timer=False)
stmt_list.append(login_stmt)
if password_pattern:
password_stmt = Statement(pattern=password_pattern,
action=password_handler,
args=None,
loop_continue=True,
continue_timer=False)
stmt_list.append(password_stmt)
if stmt_list:
return stmt_list
def update_context(spawn, context, session, **kwargs):
context.update(kwargs)
#############################################################
# Generic statements
#############################################################
class GenericStatements():
"""
Class that defines All the Statements for Generic platform
implementation
"""
def __init__(self):
'''
All generic Statements
'''
self.escape_char_stmt = Statement(pattern=pat.escape_char,
action=escape_char_callback,
args=None,
loop_continue=True,
continue_timer=False)
self.press_return_stmt = Statement(pattern=pat.press_return,
action=sendline, args=None,
loop_continue=True,
continue_timer=False)
self.connection_refused_stmt = \
Statement(pattern=pat.connection_refused,
action=connection_refused_handler,
args=None,
loop_continue=False,
continue_timer=False)
self.bad_password_stmt = Statement(pattern=pat.bad_passwords,
action=bad_password_handler,
args=None,
loop_continue=False,
continue_timer=False)
self.login_incorrect = Statement(pattern=pat.login_incorrect,
action=incorrect_login_handler,
args=None,
loop_continue=True,
continue_timer=False)
self.disconnect_error_stmt = Statement(pattern=pat.disconnect_message,
action=connection_failure_handler,
args={'err': 'received disconnect from router'},
loop_continue=False,
continue_timer=False)
self.login_stmt = Statement(pattern=pat.username,
action=login_handler,
args=None,
loop_continue=True,
continue_timer=False)
self.useraccess_stmt = Statement(pattern=pat.useracess,
action=user_access_verification,
args=None,
loop_continue=True,
continue_timer=False)
self.password_stmt = Statement(pattern=pat.password,
action=password_handler,
args=None,
loop_continue=True,
continue_timer=False)
self.enable_password_stmt = Statement(pattern=pat.password,
action=enable_password_handler,
args=None,
loop_continue=True,
continue_timer=False)
self.password_ok_stmt = Statement(pattern=pat.password_ok,
action=sendline,
args=None,
loop_continue=True,
continue_timer=False)
self.more_prompt_stmt = Statement(pattern=pat.more_prompt,
action=more_prompt_handler,
args=None,
loop_continue=True,
continue_timer=False)
self.confirm_prompt_stmt = Statement(pattern=pat.confirm_prompt,
action=sendline,
args=None,
loop_continue=True,
continue_timer=False)
self.confirm_prompt_y_n_stmt = Statement(pattern=pat.confirm_prompt_y_n,
action='sendline(y)',
args=None,
loop_continue=True,
continue_timer=False)
self.yes_no_stmt = Statement(pattern=pat.yes_no_prompt,
action=sendline,
args={'command': 'y'},
loop_continue=True,
continue_timer=False)
self.continue_connect_stmt = Statement(pattern=pat.continue_connect,
action=ssh_continue_connecting,
args=None,
loop_continue=True,
continue_timer=False)
self.hit_enter_stmt = Statement(pattern=pat.hit_enter,
action=wait_and_enter,
args=None,
loop_continue=True,
continue_timer=False)
self.press_ctrlx_stmt = Statement(pattern=pat.press_ctrlx,
action=wait_and_enter,
args=None,
loop_continue=True,
continue_timer=False)
self.init_conf_stmt = Statement(pattern=pat.setup_dialog,
action='sendline(no)',
args=None,
loop_continue=True,
continue_timer=False)
self.mgmt_setup_stmt = Statement(pattern=pat.enter_basic_mgmt_setup,
action='send(\x03)', # Ctrl-C
args=None,
loop_continue=True,
continue_timer=False)
self.clear_kerberos_no_realm = Statement(pattern=pat.kerberos_no_realm,
action=sendline,
args=None,
loop_continue=True,
continue_timer=False)
self.connected_stmt = Statement(pattern=pat.connected,
action=sendline,
args=None,
loop_continue=True,
continue_timer=False)
self.passphrase_stmt = Statement(pattern=pat.passphrase_prompt,
action=passphrase_handler,
args=None,
loop_continue=True,
continue_timer=False)
self.sudo_stmt = Statement(pattern=pat.sudo_password_prompt,
action=sudo_password_handler,
args=None,
loop_continue=True,
continue_timer=False)
#############################################################
# Statement lists
#############################################################
generic_statements = GenericStatements()
#############################################################
# Initial connection Statements
#############################################################
pre_connection_statement_list = [generic_statements.escape_char_stmt,
generic_statements.press_return_stmt,
generic_statements.continue_connect_stmt,
generic_statements.connection_refused_stmt,
generic_statements.disconnect_error_stmt,
generic_statements.hit_enter_stmt,
generic_statements.press_ctrlx_stmt,
generic_statements.connected_stmt,
]
#############################################################
# Authentication Statements
#############################################################
authentication_statement_list = [generic_statements.bad_password_stmt,
generic_statements.login_incorrect,
generic_statements.login_stmt,
generic_statements.useraccess_stmt,
generic_statements.password_stmt,
generic_statements.clear_kerberos_no_realm,
generic_statements.password_ok_stmt,
generic_statements.passphrase_stmt
]
#############################################################
# Setup Statements
#############################################################
initial_statement_list = [generic_statements.init_conf_stmt,
generic_statements.mgmt_setup_stmt
]
connection_statement_list = authentication_statement_list + initial_statement_list + pre_connection_statement_list
############################################################
# Default pattern Statement
#############################################################
default_statement_list = [generic_statements.more_prompt_stmt]
|
# -*- coding: utf-8 -*-
import warnings
from .helper import Helper
from .table import Table
from ..outputs import NullOutput
class TableHelper(Helper):
"""
Provides helpers to display table output.
"""
LAYOUT_DEFAULT = 0
LAYOUT_BORDERLESS = 1
LAYOUT_COMPACT = 2
def __init__(self):
warnings.warn('TableHelper class is deprecated. '
'Use the Table class instead', DeprecationWarning)
self._table = Table(NullOutput())
def set_layout(self, layout):
"""
Sets table layout type.
:param layout: self.LAYOUT_*
:type layout: int
:rtype: TableHelper
"""
if layout == self.LAYOUT_BORDERLESS:
self._table.set_style('borderless')
elif layout == self.LAYOUT_COMPACT:
self._table.set_style('compact')
elif layout == self.LAYOUT_DEFAULT:
self._table.set_style('default')
else:
raise Exception('Invalid table layout "%s".' % layout)
return self
def set_headers(self, headers):
self._table.set_headers(headers)
return self
def set_rows(self, rows):
self._table.set_rows(rows)
return self
def add_rows(self, rows):
self._table.add_rows(rows)
return self
def add_row(self, row):
self._table.add_row(row)
return self
def set_row(self, column, row):
self._table.set_row(column, row)
return self
def set_padding_char(self, padding_char):
self._table.get_style().set_padding_char(padding_char)
return self
def set_horizontal_border_char(self, horizontal_border_char):
self._table.get_style().set_horizontal_border_char(horizontal_border_char)
return self
def set_vertical_border_char(self, vertical_border_char):
self._table.get_style().set_vertical_border_char(vertical_border_char)
return self
def set_crossing_char(self, crossing_char):
self._table.get_style().set_crossing_char(crossing_char)
return self
def set_cell_header_format(self, cell_header_format):
self._table.get_style().set_cell_header_format(cell_header_format)
return self
def set_cell_row_format(self, cell_row_format):
self._table.get_style().set_cell_row_format(cell_row_format)
return self
def set_cell_row_content_format(self, cell_row_content_format):
self._table.get_style().set_cell_row_content_format(cell_row_content_format)
return self
def set_border_format(self, border_format):
self._table.get_style().set_border_format(border_format)
return self
def set_pad_type(self, pad_type):
self._table.get_style().set_pad_type(pad_type)
return self
def render(self, output):
"""
Renders table to output.
Example:
+---------------+-----------------------+------------------+
| ISBN | Title | Author |
+---------------+-----------------------+------------------+
| 99921-58-10-7 | Divine Comedy | Dante Alighieri |
| 9971-5-0210-0 | A Tale of Two Cities | Charles Dickens |
| 960-425-059-0 | The Lord of the Rings | J. R. R. Tolkien |
+---------------+-----------------------+------------------+
:param output_: Output
:type output_: Output
"""
self._table._output = output
return self._table.render()
def get_name(self):
return 'table'
|
from output.models.nist_data.atomic.g_month_day.schema_instance.nistschema_sv_iv_atomic_g_month_day_max_exclusive_5_xsd.nistschema_sv_iv_atomic_g_month_day_max_exclusive_5 import NistschemaSvIvAtomicGMonthDayMaxExclusive5
__all__ = [
"NistschemaSvIvAtomicGMonthDayMaxExclusive5",
]
|
#!/usr/bin/python3
import sqlite3
import cgi, cgitb
import json
from urllib.request import urlopen
print("Content-Type: text/html")
print()
conf = {}
with open("../admin/qsdb.conf", mode="rt") as fl:
for line in fl:
line = line.strip().strip(" ")
if len(line) < 1 or line[0] == "#": continue
token = line.split("=")
if len(token) < 2: continue
conf[token[0].strip(" ")] = token[1].strip(" ")
def make_dict(cur):
return {key[0]: value for key, value in zip(cur.description, cur.fetchall()[0])}
form = cgi.FieldStorage()
try:
node_id = int(form.getvalue('node_id'))
except Exception as e:
print(-2)
exit()
hostname = form.getvalue('host') if "host" in form else ""
if "root_path" not in conf:
print(-1)
exit()
if hostname != "":
try:
request = "&".join(["%s=%s" % (key, form.getvalue(key)) for key in form if key != "host"])
print(urlopen("%s/scripts/get-image.py?%s" % (hostname, request), timeout = 2).read().decode("utf8"))
except Exception as e:
print(-1)
exit()
database = "%s/data/database.sqlite" % conf["root_path"]
db = sqlite3.connect(database)
my_cur = db.cursor()
my_cur.execute('SELECT * FROM images WHERE node_id = %i;' % node_id)
if my_cur.rowcount == 0:
print("{}")
exit()
result = make_dict(my_cur)
print(json.dumps(result))
|
def order_search(item, items: list):
if not isinstance(items, list):
print(f'The items {items} MUST be of type list')
return
items.sort()
size = len(items) - 1
lower = 0
upper = size
while lower <= upper:
mid = (upper + lower) // 2
if item == items[mid]:
return mid
if item > items[mid]:
lower = mid + 1
else:
upper = mid - 1
if lower > upper:
return None
data = [87, 47, 23, 53, 20, 56, 6, 19, 8, 41]
print(order_search(53, data))
|
def main():
# input
n = int(input())
# cmpute
ans, i = [], 1
while i+1 <= n:
i += 1
x = i
if x%3==0 or x%10==3:
ans.append(i)
while x:
x //= 10
# output
for i in range(len(ans)):
print(f' {ans[i]}', end='')
if __name__ == '__main__':
main()
|
from pexels_api import API
import os
# Init api object with your Pexels API key
API_KEY = os.environ.get("PEXELS_API_KEY")
api = API(API_KEY)
# Search 'koala' photos
api.search("koala")
print("Total results: ", api.total_results)
# Loop all the pages
while True:
# Get all photos in the page
photos = api.get_entries()
# For each photo print its properties
for photo in photos:
print("-----------------------------------------------")
print("Photo id: ", photo.id)
print("Photo width: ", photo.width)
print("Photo height: ", photo.height)
print("Photo url: ", photo.url)
print("Photographer: ", photo.photographer)
print("Photo description: ", photo.description)
print("Photo extension: ", photo.extension)
print("Photo sizes:")
print("\toriginal: ", photo.original)
print("\tcompressed: ", photo.compressed)
print("\tlarge2x: ", photo.large2x)
print("\tlarge: ", photo.large)
print("\tmedium: ", photo.medium)
print("\tsmall: ", photo.small)
print("\ttiny: ", photo.tiny)
print("\tportrait: ", photo.portrait)
print("\tlandscape: ", photo.landscape)
# If there is no next page print the last page and end the loop
if not api.has_next_page:
print("Last page: ", api.page)
break
# Search next page
api.search_next_page()
|
#/ Type: UI
#/ Name: Channel Calculator
#/ Authors: Joe Petrus and Bence Paul
#/ Description: This is an example that lets you create a channel from existing channels
#/ References: None
#/ Version: 1.0
#/ Contact: support@iolite-software.com
from iolite.QtGui import QAction, QLabel, QLineEdit, QCompleter, QDialog, QHBoxLayout, QVBoxLayout, QDialogButtonBox, QComboBox
from iolite.QtCore import Qt
import numpy as np
import re
class AutoCompleteEdit(QLineEdit):
"""
A slightly modified version of:
http://blog.elentok.com/2011/08/autocomplete-textbox-for-multiple.html
"""
def __init__(self, model, separator = ' ', addSpaceAfterCompleting = True, parent = None):
super().__init__(parent)
self._separator = separator
self._addSpaceAfterCompleting = addSpaceAfterCompleting
self._completer = QCompleter(model)
self._completer.setWidget(self)
self._completer.activated.connect(self._insertCompletion)
self._keysToIgnore = [Qt.Key_Enter,
Qt.Key_Return,
Qt.Key_Escape,
Qt.Key_Tab]
def _insertCompletion(self, completion):
extra = len(completion) - len(self._completer.completionPrefix)
extra_text = completion[-extra:]
if self._addSpaceAfterCompleting:
extra_text += ' '
self.setText(self.text + extra_text)
def textUnderCursor(self):
text = self.text
textUnderCursor = ''
i = self.cursorPosition - 1
while i >=0 and text[i] != self._separator:
textUnderCursor = text[i] + textUnderCursor
i -= 1
return textUnderCursor
def keyPressEvent(self, event):
if self._completer.popup().isVisible():
if event.key() in self._keysToIgnore:
event.ignore()
return
QLineEdit.keyPressEvent(self, event)
completionPrefix = self.textUnderCursor()
et = event.text()
if completionPrefix != self._completer.completionPrefix:
self._updateCompleterPopupItems(completionPrefix)
if len(et) > 0 and len(completionPrefix) > 0:
self._completer.complete()
if len(completionPrefix) == 0:
self._completer.popup().hide()
def _updateCompleterPopupItems(self, completionPrefix):
self._completer.setCompletionPrefix(completionPrefix)
self._completer.popup().setCurrentIndex(self._completer.completionModel().index(0,0))
def createUIElements():
action = QAction("Channel Calculator", ui)
action.triggered.connect(calculate)
ui.setAction(action)
ui.setMenuName(['Examples'])
def calculate():
d = QDialog()
d.setWindowTitle('Channel Calculator')
hl = QHBoxLayout()
name_le = QLineEdit(d)
hl.addWidget(name_le)
hl.addWidget(QLabel('='))
channel_names = [re.sub('\W|^(?=\d)','_', channel_name) for channel_name in data.timeSeriesNames()]
eqn_le = AutoCompleteEdit(channel_names, parent=d)
hl.addWidget(eqn_le)
type_cb = QComboBox(d)
channel_types = {'Input': data.Input, 'Intermediate': data.Intermediate, 'Output': data.Output}
for tname, tv in channel_types.items():
type_cb.addItem(tname, tv)
hl.addWidget(type_cb)
bb = QDialogButtonBox(QDialogButtonBox.Cancel | QDialogButtonBox.Ok, Qt.Horizontal, d)
bb.accepted.connect(d.accept)
bb.rejected.connect(d.reject)
hl.addWidget(bb)
d.setLayout(hl)
if d.exec() == QDialog.Accepted:
for channel_name in data.timeSeriesNames():
exec('%s = data.timeSeries("%s").data()'%(re.sub('\W|^(?=\d)','_', channel_name), channel_name))
new_data = eval('%s'%eqn_le.text)
new_type = type_cb.currentData
new_channel = data.createTimeSeries(name_le.text, new_type, None, new_data)
new_channel.setProperty('Created by', 'Channel Calculator')
data.dataChanged.emit()
|
# See pybullet quickstart guide here:
# https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
# Create a Tiltbrush-like app, drawing lines using any controller
# Line width can be changed
import pybullet as p
CONTROLLER_ID = 0
POSITION = 1
ORIENTATION = 2
BUTTONS = 6
#assume that the VR physics server is already started before
c = p.connect(p.SHARED_MEMORY)
print(c)
if (c < 0):
p.connect(p.GUI)
p.setInternalSimFlags(0) #don't load default robot assets etc
p.resetSimulation()
p.loadURDF("plane.urdf")
p.loadURDF("cube.urdf", 0, 0, 1)
p.setGravity(0, 0, -10)
p.setRealTimeSimulation(1)
prevPosition = [None] * p.VR_MAX_CONTROLLERS
colors = [0., 0.5, 0.5] * p.VR_MAX_CONTROLLERS
widths = [3] * p.VR_MAX_CONTROLLERS
a = [0, 0, 0]
#use a few default colors
colors[0] = [0, 0, 0]
colors[1] = [0.5, 0, 0]
colors[2] = [0, 0.5, 0]
colors[3] = [0, 0, 0.5]
colors[4] = [0.5, 0.5, 0.]
colors[5] = [.5, .5, .5]
p.startStateLogging(p.STATE_LOGGING_VR_CONTROLLERS, "vr_hmd.bin", deviceTypeFilter=p.VR_DEVICE_HMD)
p.startStateLogging(p.STATE_LOGGING_GENERIC_ROBOT, "generic_data.bin")
p.startStateLogging(p.STATE_LOGGING_CONTACT_POINTS, "contact_points.bin")
while True:
events = p.getVREvents(p.VR_DEVICE_HMD + p.VR_DEVICE_GENERIC_TRACKER)
for e in (events):
pos = e[POSITION]
mat = p.getMatrixFromQuaternion(e[ORIENTATION])
dir0 = [mat[0], mat[3], mat[6]]
dir1 = [mat[1], mat[4], mat[7]]
dir2 = [mat[2], mat[5], mat[8]]
lineLen = 0.1
dir = [-mat[2], -mat[5], -mat[8]]
to = [pos[0] + lineLen * dir[0], pos[1] + lineLen * dir[1], pos[2] + lineLen * dir[2]]
toX = [pos[0] + lineLen * dir0[0], pos[1] + lineLen * dir0[1], pos[2] + lineLen * dir0[2]]
toY = [pos[0] + lineLen * dir1[0], pos[1] + lineLen * dir1[1], pos[2] + lineLen * dir1[2]]
toZ = [pos[0] + lineLen * dir2[0], pos[1] + lineLen * dir2[1], pos[2] + lineLen * dir2[2]]
p.addUserDebugLine(pos, toX, [1, 0, 0], 1)
p.addUserDebugLine(pos, toY, [0, 1, 0], 1)
p.addUserDebugLine(pos, toZ, [0, 0, 1], 1)
p.addUserDebugLine(pos, to, [0.5, 0.5, 0.], 1, 3)
events = p.getVREvents()
for e in (events):
if (e[BUTTONS][33] & p.VR_BUTTON_WAS_TRIGGERED):
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
if (e[BUTTONS][32] & p.VR_BUTTON_WAS_TRIGGERED):
widths[e[CONTROLLER_ID]] = widths[e[0]] + 1
if (widths[e[CONTROLLER_ID]] > 20):
widths[e[CONTROLLER_ID]] = 1
if (e[BUTTONS][1] & p.VR_BUTTON_WAS_TRIGGERED):
p.resetSimulation()
#p.setGravity(0,0,-10)
p.removeAllUserDebugItems()
p.loadURDF("plane.urdf")
if (e[BUTTONS][33] == p.VR_BUTTON_IS_DOWN):
pt = prevPosition[e[CONTROLLER_ID]]
#print(prevPosition[e[0]])
#print(e[1])
diff = [pt[0] - e[POSITION][0], pt[1] - e[POSITION][1], pt[2] - e[POSITION][2]]
lenSqr = diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2]
ptDistThreshold = 0.01
if (lenSqr > (ptDistThreshold * ptDistThreshold)):
p.addUserDebugLine(e[POSITION], prevPosition[e[CONTROLLER_ID]], colors[e[CONTROLLER_ID]],
widths[e[CONTROLLER_ID]])
#p.loadURDF("cube_small.urdf",e[1])
colors[e[CONTROLLER_ID]] = [
1 - colors[e[CONTROLLER_ID]][0], 1 - colors[e[CONTROLLER_ID]][1],
1 - colors[e[CONTROLLER_ID]][2]
]
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
|
from werkzeug.serving import run_simple
# from inquire_sql_backend.server_annoy import app
from inquire_sql_backend.server_nms import app
if __name__ == '__main__':
run_simple("0.0.0.0", port=9000, application=app)
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.geb.tests.base import (
BaseWebDocsTest
)
from openprocurement.auctions.geb.tests.blanks.create import (
create_auction_dump
)
from openprocurement.auctions.geb.tests.blanks.draft import (
get_auction_dump,
phase_commit_dump
)
from openprocurement.auctions.geb.tests.blanks.active_rectification import (
change_title_dump,
add_document_dump
)
from openprocurement.auctions.geb.tests.blanks.active_tendering import (
add_question_dump,
answer_question_dump,
bid_make_pending_dump,
bid_delete_in_pending_status_dump,
bid_get_in_pending_status_dump,
bid_make_activate_dump,
bid_add_dump
)
from openprocurement.auctions.geb.tests.blanks.active_auction import (
get_auction_urls_dump
)
from openprocurement.auctions.geb.tests.helpers import (
change_machine_state
)
from openprocurement.auctions.geb.tests.states import (
ProcedureMachine
)
from openprocurement.auctions.geb.tests.fixtures.active_tendering import (
ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_QUESTION,
AUCTION_WITH_DRAFT_BID,
AUCTION_WITH_PENDING_BID
)
class CreateAuctionDumpTest(BaseWebDocsTest):
test_create_auction_dump = snitch(create_auction_dump)
def setUp(self):
super(CreateAuctionDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
change_machine_state(procedure, 'create')
context = procedure.snapshot(dump=False)
self.auction = context['auction']['data']
class DraftAuctionDumpTest(BaseWebDocsTest):
test_get_auction_dump = snitch(get_auction_dump)
test_phase_commit_dump = snitch(phase_commit_dump)
def setUp(self):
super(DraftAuctionDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
change_machine_state(procedure, 'draft')
context = procedure.snapshot()
self.auction = context['auction']
class RectificationAuctionDumpTest(BaseWebDocsTest):
test_change_title_dump = snitch(change_title_dump)
def setUp(self):
super(RectificationAuctionDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
change_machine_state(procedure, 'active.rectification')
context = procedure.snapshot()
self.auction = context['auction']
self.ENTRYPOINT = '/auctions/{}?acc_token={}'.format(self.auction['data']['id'],
self.auction['access']['token'])
class RectificationAuctionDocumentsDumpTest(BaseWebDocsTest):
docservice = True
test_add_document_dump = snitch(add_document_dump)
def setUp(self):
super(RectificationAuctionDocumentsDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
change_machine_state(procedure, 'active.rectification')
context = procedure.snapshot()
self.auction = context['auction']
self.ENTRYPOINT = '/auctions/{}/documents?acc_token={}'.format(self.auction['data']['id'],
self.auction['access']['token'])
class TenderingAuctionDumpTest(BaseWebDocsTest):
test_add_question_dump = snitch(add_question_dump)
test_bid_add_dump = snitch(bid_add_dump)
def setUp(self):
super(TenderingAuctionDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.tendering')
context = procedure.snapshot()
self.auction = context['auction']
entrypoints = {}
entrypoints['questions'] = '/auctions/{}/questions'.format(self.auction['data']['id'])
entrypoints['bids'] = '/auctions/{}/bids'.format(self.auction['data']['id'])
self.ENTRYPOINTS = entrypoints
class TenderingAuctionQuestionsDumpTest(BaseWebDocsTest):
test_answer_question_dump = snitch(answer_question_dump)
def setUp(self):
super(TenderingAuctionQuestionsDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.tendering')
context = procedure.snapshot(fixture=ACTIVE_TENDERING_AUCTION_DEFAULT_FIXTURE_WITH_QUESTION)
self.auction = context['auction']
self.questions = context['questions']
class TenderingAuctionBidsDraftDumpTest(BaseWebDocsTest):
test_bid_make_pending_dump = snitch(bid_make_pending_dump)
def setUp(self):
super(TenderingAuctionBidsDraftDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.tendering')
context = procedure.snapshot(fixture=AUCTION_WITH_DRAFT_BID)
auction = context['auction']
bid = context['bids'][0]
entrypoints = {}
pattern = '/auctions/{auction}/bids/{bid}?acc_token={token}'
entrypoints['bid'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
self.ENTRYPOINTS = entrypoints
self.bid = bid
self.auction = auction
class TenderingAuctionBidsPendingDumpTest(BaseWebDocsTest):
docservice = True
test_bid_delete_in_pending_status_dump = snitch(bid_delete_in_pending_status_dump)
test_bid_make_activate_dump = snitch(bid_make_activate_dump)
test_bid_get_in_pending_status_dump = snitch(bid_get_in_pending_status_dump)
def setUp(self):
super(TenderingAuctionBidsPendingDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.tendering')
context = procedure.snapshot(fixture=AUCTION_WITH_PENDING_BID)
auction = context['auction']
bid = context['bids'][0]
entrypoints = {}
pattern = '/auctions/{auction}/bids/{bid}?acc_token={token}'
entrypoints['bid'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
pattern = '/auctions/{auction}/bids/{bid}/documents?acc_token={token}'
entrypoints['add_bid_document'] = pattern.format(auction=auction['data']['id'],
bid=bid['data']['id'],
token=bid['access']['token'])
self.ENTRYPOINTS = entrypoints
self.bid = bid
self.auction = auction
class ActiveAuctionDumpTest(BaseWebDocsTest):
docservice = True
test_get_auction_urls_dump = snitch(get_auction_urls_dump)
def setUp(self):
super(ActiveAuctionDumpTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
procedure.toggle('active.auction')
self.procedure = procedure
self.app.authorization = ('Basic', ('auction', ''))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CreateAuctionDumpTest))
suite.addTest(unittest.makeSuite(DraftAuctionDumpTest))
suite.addTest(unittest.makeSuite(RectificationAuctionDumpTest))
suite.addTest(unittest.makeSuite(RectificationAuctionDocumentsDumpTest))
suite.addTest(unittest.makeSuite(TenderingAuctionDumpTest))
suite.addTest(unittest.makeSuite(TenderingAuctionBidsDraftDumpTest))
suite.addTest(unittest.makeSuite(TenderingAuctionBidsPendingDumpTest))
suite.addTest(unittest.makeSuite(TenderingAuctionQuestionsDumpTest))
suite.addTest(unittest.makeSuite(ActiveAuctionDumpTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from typing import Dict
from requests.exceptions import RequestException
import requests, json
class AirbyteApiCallerException(Exception):
pass
class AirbyteApiCaller:
def api_call(self, endpoint: str, body: Dict[str, str] = None):
"""
Generic `api caller` for contacting Airbyte
"""
try:
response = requests.post(endpoint, json=body)
if response.status_code >= 200 and response.status_code < 300:
return json.loads(response.text) if response.text else None
else:
raise RequestException(
f"Unexpected status code from airbyte in endpoint {endpoint}: {response.status_code}"
)
except RequestException as e:
raise AirbyteApiCallerException(
f"Airbyte API error in endpoint {endpoint}: " + str(e)
)
def __init__(self, api_host, api_port):
airbyte_host = api_host
airbyte_port = api_port
airbyte_api_root = "api/v1/"
airbyte_api_base_endpoint = f"{airbyte_host}:{airbyte_port}/{airbyte_api_root}"
airbyte_api_list_component = airbyte_api_base_endpoint + "{component}/list"
self.airbyte_endpoint_list_connections = airbyte_api_list_component.format(
component="connections"
)
self.airbyte_endpoint_list_sources = airbyte_api_list_component.format(
component="sources"
)
self.airbyte_endpoint_list_destinations = airbyte_api_list_component.format(
component="destinations"
)
airbyte_endpoint_list_workspaces = airbyte_api_list_component.format(
component="workspaces"
)
airbyte_api_create_component = airbyte_api_base_endpoint + "{component}/create"
self.airbyte_endpoint_create_connections = airbyte_api_create_component.format(
component="connections"
)
self.airbyte_endpoint_create_sources = airbyte_api_create_component.format(
component="sources"
)
self.airbyte_endpoint_create_destinations = airbyte_api_create_component.format(
component="destinations"
)
airbyte_api_update_component = airbyte_api_base_endpoint + "{component}/update"
self.airbyte_endpoint_update_sources = airbyte_api_update_component.format(
component="sources"
)
self.airbyte_endpoint_update_destinations = airbyte_api_update_component.format(
component="destinations"
)
self.airbyte_endpoint_delete_connection = (
airbyte_api_base_endpoint + "connections/delete"
)
self.airbyte_endpoint_list_destination_definitions = (
airbyte_api_base_endpoint + "destination_definitions/list"
)
self.airbyte_endpoint_list_source_definitions = (
airbyte_api_base_endpoint + "source_definitions/list"
)
self.airbyte_endpoint_get_source_definition = (
airbyte_api_base_endpoint + "source_definition_specifications/get"
)
self.airbyte_endpoint_get_destination_definition = (
airbyte_api_base_endpoint + "destination_definition_specifications/get"
)
try:
self.airbyte_workspace_id = self.api_call(airbyte_endpoint_list_workspaces)[
"workspaces"
][0]["workspaceId"]
self.standard_request_body = {"workspaceId": self.airbyte_workspace_id}
self.airbyte_connections_list = self.api_call(
self.airbyte_endpoint_list_connections, self.standard_request_body
)["connections"]
self.airbyte_sources_list = self.api_call(
self.airbyte_endpoint_list_sources, self.standard_request_body
)["sources"]
self.airbyte_destinations_list = self.api_call(
self.airbyte_endpoint_list_destinations, self.standard_request_body
)["destinations"]
except AirbyteApiCallerException as e:
raise AirbyteApiCallerException(
f"Couldn't retrieve Airbyte connections, sources and destinations {e}"
)
def load_definitions(self):
self.destination_definitions = self.api_call(
self.airbyte_endpoint_list_destination_definitions,
self.standard_request_body,
)["destinationDefinitions"]
self.source_definitions = self.api_call(
self.airbyte_endpoint_list_source_definitions, self.standard_request_body
)["sourceDefinitions"]
|
import torch
from data import TestDataset
import numpy as np
from network import Discriminator,Generator
from torch.autograd import Variable
import time
from utils import *
import importlib
import argparse
test_time = False
def parse_args():
parser = argparse.ArgumentParser( description = "bicubic" )
parser.add_argument("-input_list")
parser.add_argument('-landmark_list')
parser.add_argument('-resume_model',help='resume_model dirname')
parser.add_argument("-subdir",help='output_dir = save/$resume_model/test/$subdir')
parser.add_argument('--batch_size',type=int , default = 256 )
flag_parser = parser.add_mutually_exclusive_group(required=False)#whether the input images are in the format of label1/img1 label2/img2
flag_parser.add_argument("--folder",dest='folder',action="store_true")
flag_parser.add_argument("--nofolder",dest='folder',action='store_false')
parser.set_defaults( folder= True )
args = parser.parse_args()
return args
def init_dir(args):
os.system( 'mkdir -p {}'.format('/'.join([args.resume_model,'test',args.subdir,'single'])))
os.system( 'mkdir -p {}'.format('/'.join([args.resume_model,'test',args.subdir,'grid'])))
if __name__ == "__main__":
args = parse_args()
init_dir(args)
img_list = open(args.input_list,'r').read().split('\n')
img_list.pop()
lm_list = open(args.landmark_list,'r').read().split('\n')
lm_list.pop()
#input
train_config = importlib.import_module( '.'.join( [ *args.resume_model.split('/') , 'config'] ) )
dataloader = torch.utils.data.DataLoader( TestDataset( img_list , lm_list ) , batch_size = args.batch_size , shuffle = False , num_workers = 8 , pin_memory = True)
G = Generator(zdim = train_config.G['zdim'], use_batchnorm = train_config.G['use_batchnorm'] , use_residual_block = train_config.G['use_residual_block'] , num_classes = train_config.G['num_classes']).cuda()
D = Discriminator(use_batchnorm = train_config.D['use_batchnorm']).cuda()
if args.resume_model is not None:
e1 = resume_model( G , args.resume_model )
e2 = resume_model( D , args.resume_model )
assert e1 == e2
set_requires_grad(G,False)
set_requires_grad(D,False)
for step,batch in enumerate(dataloader):
if test_time:
print("step : ", step)
t_pre = time.time()
print("preprocess time : ",t_pre - tt )
tt = t_pre
for k in batch:
batch[k] = Variable( batch[k].cuda(async = True) )
left_eye_patch = batch['left_eye']
right_eye_patch = batch['right_eye']
nose_patch = batch['nose']
mouth_patch = batch['mouth']
img = batch['img']
img32 = batch['img32']
img64 = batch['img64']
#img_frontal = batch['img_frontal']
#label = batch['label']
#print(torch.min(img)[0] , torch.max(img)[0] )
#print(torch.min(left_eye_patch)[0] , torch.max(left_eye_patch)[0] )
z = Variable( torch.FloatTensor( np.random.uniform(-1,1,(len(batch['img']),train_config.G['zdim'])) ).cuda() )
if test_time:
t_mv_to_cuda = time.time()
print("mv_to_cuda time : ",t_mv_to_cuda - tt )
tt = t_mv_to_cuda
img128_fake , img64_fake , img32_fake , G_encoder_outputs , local_predict , le_fake , re_fake , nose_fake , mouth_fake , local_input = G( batch['img'] , batch['img64'] , batch['img32'] , batch['left_eye'] , batch['right_eye'] , batch['nose'] , batch['mouth'] , z , use_dropout = False )
if test_time:
t_forward_G = time.time()
print("forward_G time : ",t_forward_G - tt )
tt = t_forward_G
for i in range(img128_fake.shape[0]):
img_name = img_list[step*args.batch_size+i].split('/')[-1]
save_image(img128_fake[i].data , '/'.join([args.resume_model,'test',args.subdir,'single',img_name]) )
#print(resize(right_eye_patch[i].data.cpu(),(128,128)).shape)
save_image(torch.stack([img128_fake[i].data.cpu(),batch['img'][i].data.cpu(),local_predict.data.cpu() , local_input.data.cpu() ) , '/'.join([args.resume_model,'test',args.subdir,'grid',img_name]))
if test_time:
t_backward = time.time()
print("backward time : ",t_backward - tt )
tt = t_backward
if test_time:
t_numpy = time.time()
print("numy time: " , t_numpy - tt )
tt = t_numpy
t = new_t
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DnsNameAvailabilityResult(Model):
"""Response for the CheckDnsNameAvailability API service call.
:param available: Domain availability (True/False).
:type available: bool
"""
_attribute_map = {
'available': {'key': 'available', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(DnsNameAvailabilityResult, self).__init__(**kwargs)
self.available = kwargs.get('available', None)
|
import os
import numpy as np
from numpy.lib.function_base import disp
import torch
import decord
from PIL import Image
from torchvision import transforms
from random_erasing import RandomErasing
import warnings
from decord import VideoReader, cpu
from torch.utils.data import Dataset
import video_transforms as video_transforms
import volume_transforms as volume_transforms
class VideoClsDataset(Dataset):
"""Load your own video classification dataset."""
def __init__(self, anno_path, data_path, mode='train', clip_len=8,
frame_sample_rate=2, crop_size=224, short_side_size=256,
new_height=256, new_width=340, keep_aspect_ratio=True,
num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,args=None):
self.anno_path = anno_path
self.data_path = data_path
self.mode = mode
self.clip_len = clip_len
self.frame_sample_rate = frame_sample_rate
self.crop_size = crop_size
self.short_side_size = short_side_size
self.new_height = new_height
self.new_width = new_width
self.keep_aspect_ratio = keep_aspect_ratio
self.num_segment = num_segment
self.test_num_segment = test_num_segment
self.num_crop = num_crop
self.test_num_crop = test_num_crop
self.args = args
self.aug = False
self.rand_erase = False
if self.mode in ['train']:
self.aug = True
if self.args.reprob > 0:
self.rand_erase = True
if VideoReader is None:
raise ImportError("Unable to import `decord` which is required to read videos.")
import pandas as pd
cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ')
self.dataset_samples = list(cleaned.values[:, 0])
self.label_array = list(cleaned.values[:, 1])
if (mode == 'train'):
pass
elif (mode == 'validation'):
self.data_transform = video_transforms.Compose([
video_transforms.Resize(self.short_side_size, interpolation='bilinear'),
video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)),
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
elif mode == 'test':
self.data_resize = video_transforms.Compose([
video_transforms.Resize(size=(short_side_size), interpolation='bilinear')
])
self.data_transform = video_transforms.Compose([
volume_transforms.ClipToTensor(),
video_transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.test_seg = []
self.test_dataset = []
self.test_label_array = []
for ck in range(self.test_num_segment):
for cp in range(self.test_num_crop):
for idx in range(len(self.label_array)):
sample_label = self.label_array[idx]
self.test_label_array.append(sample_label)
self.test_dataset.append(self.dataset_samples[idx])
self.test_seg.append((ck, cp))
def __getitem__(self, index):
if self.mode == 'train':
args = self.args
scale_t = 1
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during training".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)
if args.num_sample > 1:
frame_list = []
label_list = []
index_list = []
for _ in range(args.num_sample):
new_frames = self._aug_frame(buffer, args)
label = self.label_array[index]
frame_list.append(new_frames)
label_list.append(label)
index_list.append(index)
return frame_list, label_list, index_list, {}
else:
buffer = self._aug_frame(buffer, args)
return buffer, self.label_array[index], index, {}
elif self.mode == 'validation':
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
if len(buffer) == 0:
while len(buffer) == 0:
warnings.warn("video {} not correctly loaded during validation".format(sample))
index = np.random.randint(self.__len__())
sample = self.dataset_samples[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_transform(buffer)
return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0]
elif self.mode == 'test':
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
while len(buffer) == 0:
warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\
str(self.test_dataset[index]), chunk_nb, split_nb))
index = np.random.randint(self.__len__())
sample = self.test_dataset[index]
chunk_nb, split_nb = self.test_seg[index]
buffer = self.loadvideo_decord(sample)
buffer = self.data_resize(buffer)
if isinstance(buffer, list):
buffer = np.stack(buffer, 0)
spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \
/ (self.test_num_crop - 1)
temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \
/ (self.test_num_segment - 1), 0)
temporal_start = int(chunk_nb * temporal_step)
spatial_start = int(split_nb * spatial_step)
if buffer.shape[1] >= buffer.shape[2]:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
spatial_start:spatial_start + self.short_side_size, :, :]
else:
buffer = buffer[temporal_start:temporal_start + self.clip_len, \
:, spatial_start:spatial_start + self.short_side_size, :]
buffer = self.data_transform(buffer)
return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \
chunk_nb, split_nb
else:
raise NameError('mode {} unkown'.format(self.mode))
def _aug_frame(
self,
buffer,
args,
):
aug_transform = video_transforms.create_random_augment(
input_size=(self.crop_size, self.crop_size),
auto_augment=args.aa,
interpolation=args.train_interpolation,
)
buffer = [
transforms.ToPILImage()(frame) for frame in buffer
]
buffer = aug_transform(buffer)
buffer = [transforms.ToTensor()(img) for img in buffer]
buffer = torch.stack(buffer) # T C H W
buffer = buffer.permute(0, 2, 3, 1) # T H W C
# T H W C
buffer = tensor_normalize(
buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
)
# T H W C -> C T H W.
buffer = buffer.permute(3, 0, 1, 2)
# Perform data augmentation.
scl, asp = (
[0.08, 1.0],
[0.75, 1.3333],
)
buffer = spatial_sampling(
buffer,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=self.crop_size,
random_horizontal_flip=False if args.data_set == 'SSV2' else True ,
inverse_uniform_sampling=False,
aspect_ratio=asp,
scale=scl,
motion_shift=False
)
if self.rand_erase:
erase_transform = RandomErasing(
args.reprob,
mode=args.remode,
max_count=args.recount,
num_splits=args.recount,
device="cpu",
)
buffer = buffer.permute(1, 0, 2, 3)
buffer = erase_transform(buffer)
buffer = buffer.permute(1, 0, 2, 3)
return buffer
def loadvideo_decord(self, sample, sample_rate_scale=1):
"""Load video content using Decord"""
fname = sample
if not (os.path.exists(fname)):
return []
# avoid hanging issue
if os.path.getsize(fname) < 1 * 1024:
print('SKIP: ', fname, " - ", os.path.getsize(fname))
return []
try:
if self.keep_aspect_ratio:
vr = VideoReader(fname, num_threads=1, ctx=cpu(0))
else:
vr = VideoReader(fname, width=self.new_width, height=self.new_height,
num_threads=1, ctx=cpu(0))
except:
print("video cannot be loaded by decord: ", fname)
return []
if self.mode == 'test':
all_index = [x for x in range(0, len(vr), self.frame_sample_rate)]
while len(all_index) < self.clip_len:
all_index.append(all_index[-1])
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
# handle temporal segments
converted_len = int(self.clip_len * self.frame_sample_rate)
seg_len = len(vr) // self.num_segment
all_index = []
for i in range(self.num_segment):
if seg_len <= converted_len:
index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)
index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))
index = np.clip(index, 0, seg_len - 1).astype(np.int64)
else:
end_idx = np.random.randint(converted_len, seg_len)
str_idx = end_idx - converted_len
index = np.linspace(str_idx, end_idx, num=self.clip_len)
index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)
index = index + i*seg_len
all_index.extend(list(index))
all_index = all_index[::int(sample_rate_scale)]
vr.seek(0)
buffer = vr.get_batch(all_index).asnumpy()
return buffer
def __len__(self):
if self.mode != 'test':
return len(self.dataset_samples)
else:
return len(self.test_dataset)
def spatial_sampling(
frames,
spatial_idx=-1,
min_scale=256,
max_scale=320,
crop_size=224,
random_horizontal_flip=True,
inverse_uniform_sampling=False,
aspect_ratio=None,
scale=None,
motion_shift=False,
):
"""
Perform spatial sampling on the given video frames. If spatial_idx is
-1, perform random scale, random crop, and random flip on the given
frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling
with the given spatial_idx.
Args:
frames (tensor): frames of images sampled from the video. The
dimension is `num frames` x `height` x `width` x `channel`.
spatial_idx (int): if -1, perform random spatial sampling. If 0, 1,
or 2, perform left, center, right crop if width is larger than
height, and perform top, center, buttom crop if height is larger
than width.
min_scale (int): the minimal size of scaling.
max_scale (int): the maximal size of scaling.
crop_size (int): the size of height and width used to crop the
frames.
inverse_uniform_sampling (bool): if True, sample uniformly in
[1 / max_scale, 1 / min_scale] and take a reciprocal to get the
scale. If False, take a uniform sample from [min_scale,
max_scale].
aspect_ratio (list): Aspect ratio range for resizing.
scale (list): Scale range for resizing.
motion_shift (bool): Whether to apply motion shift for resizing.
Returns:
frames (tensor): spatially sampled frames.
"""
assert spatial_idx in [-1, 0, 1, 2]
if spatial_idx == -1:
if aspect_ratio is None and scale is None:
frames, _ = video_transforms.random_short_side_scale_jitter(
images=frames,
min_size=min_scale,
max_size=max_scale,
inverse_uniform_sampling=inverse_uniform_sampling,
)
frames, _ = video_transforms.random_crop(frames, crop_size)
else:
transform_func = (
video_transforms.random_resized_crop_with_shift
if motion_shift
else video_transforms.random_resized_crop
)
frames = transform_func(
images=frames,
target_height=crop_size,
target_width=crop_size,
scale=scale,
ratio=aspect_ratio,
)
if random_horizontal_flip:
frames, _ = video_transforms.horizontal_flip(0.5, frames)
else:
# The testing is deterministic and no jitter should be performed.
# min_scale, max_scale, and crop_size are expect to be the same.
assert len({min_scale, max_scale, crop_size}) == 1
frames, _ = video_transforms.random_short_side_scale_jitter(
frames, min_scale, max_scale
)
frames, _ = video_transforms.uniform_crop(frames, crop_size, spatial_idx)
return frames
def tensor_normalize(tensor, mean, std):
"""
Normalize a given tensor by subtracting the mean and dividing the std.
Args:
tensor (tensor): tensor to normalize.
mean (tensor or list): mean value to subtract.
std (tensor or list): std to divide.
"""
if tensor.dtype == torch.uint8:
tensor = tensor.float()
tensor = tensor / 255.0
if type(mean) == list:
mean = torch.tensor(mean)
if type(std) == list:
std = torch.tensor(std)
tensor = tensor - mean
tensor = tensor / std
return tensor
class VideoMAE(torch.utils.data.Dataset):
"""Load your own video classification dataset.
Parameters
----------
root : str, required.
Path to the root folder storing the dataset.
setting : str, required.
A text file describing the dataset, each line per video sample.
There are three items in each line: (1) video path; (2) video length and (3) video label.
train : bool, default True.
Whether to load the training or validation set.
test_mode : bool, default False.
Whether to perform evaluation on the test set.
Usually there is three-crop or ten-crop evaluation strategy involved.
name_pattern : str, default None.
The naming pattern of the decoded video frames.
For example, img_00012.jpg.
video_ext : str, default 'mp4'.
If video_loader is set to True, please specify the video format accordinly.
is_color : bool, default True.
Whether the loaded image is color or grayscale.
modality : str, default 'rgb'.
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1.
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.
num_crop : int, default 1.
Number of crops for each image. default is 1.
Common choices are three crops and ten crops during evaluation.
new_length : int, default 1.
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_step : int, default 1.
Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.
new_step=2 means we will extract a video clip of every other frame.
temporal_jitter : bool, default False.
Whether to temporally jitter if new_step > 1.
video_loader : bool, default False.
Whether to use video loader to load data.
use_decord : bool, default True.
Whether to use Decord video loader to load data. Otherwise use mmcv video loader.
transform : function, default None.
A function that takes data and label and transforms them.
data_aug : str, default 'v1'.
Different types of data augmentation auto. Supports v1, v2, v3 and v4.
lazy_init : bool, default False.
If set to True, build a dataset instance without loading any dataset.
"""
def __init__(self,
root,
setting,
train=True,
test_mode=False,
name_pattern='img_%05d.jpg',
video_ext='mp4',
is_color=True,
modality='rgb',
num_segments=1,
num_crop=1,
new_length=1,
new_step=1,
transform=None,
temporal_jitter=False,
video_loader=False,
use_decord=False,
lazy_init=False):
super(VideoMAE, self).__init__()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.num_crop = num_crop
self.new_length = new_length
self.new_step = new_step
self.skip_length = self.new_length * self.new_step
self.temporal_jitter = temporal_jitter
self.name_pattern = name_pattern
self.video_loader = video_loader
self.video_ext = video_ext
self.use_decord = use_decord
self.transform = transform
self.lazy_init = lazy_init
if not self.lazy_init:
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n"
"Check your data directory (opt.data-dir)."))
def __getitem__(self, index):
directory, target = self.clips[index]
if self.video_loader:
if '.' in directory.split('/')[-1]:
# data in the "setting" file already have extension, e.g., demo.mp4
video_name = directory
else:
# data in the "setting" file do not have extension, e.g., demo
# So we need to provide extension (i.e., .mp4) to complete the file name.
video_name = '{}.{}'.format(directory, self.video_ext)
decord_vr = decord.VideoReader(video_name, num_threads=1)
duration = len(decord_vr)
segment_indices, skip_offsets = self._sample_train_indices(duration)
images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)
process_data, mask = self.transform((images, None)) # T*C,H,W
process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W
return (process_data, mask)
def __len__(self):
return len(self.clips)
def _make_dataset(self, directory, setting):
if not os.path.exists(setting):
raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split(' ')
# line format: video_path, video_duration, video_label
if len(line_info) < 2:
raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))
clip_path = os.path.join(line_info[0])
target = int(line_info[1])
item = (clip_path, target)
clips.append(item)
return clips
def _sample_train_indices(self, num_frames):
average_duration = (num_frames - self.skip_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)),
average_duration)
offsets = offsets + np.random.randint(average_duration,
size=self.num_segments)
elif num_frames > max(self.num_segments, self.skip_length):
offsets = np.sort(np.random.randint(
num_frames - self.skip_length + 1,
size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
if self.temporal_jitter:
skip_offsets = np.random.randint(
self.new_step, size=self.skip_length // self.new_step)
else:
skip_offsets = np.zeros(
self.skip_length // self.new_step, dtype=int)
return offsets + 1, skip_offsets
def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):
sampled_list = []
frame_id_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
if offset + skip_offsets[i] <= duration:
frame_id = offset + skip_offsets[i] - 1
else:
frame_id = offset - 1
frame_id_list.append(frame_id)
if offset + self.new_step < duration:
offset += self.new_step
try:
video_data = video_reader.get_batch(frame_id_list).asnumpy()
sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)]
except:
raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration))
return sampled_list
|
def exec_proj(name):
__import__(name)
|
# Scrapy settings for SWE_Project project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from shutil import which
from dotenv import load_dotenv
import os
load_dotenv(".env")
SELENIUM_DRIVER_NAME = 'firefox'
#SELENIUM_DRIVER_EXECUTABLE_PATH = which('geckodriver')
H_USER = os.getenv('HUB_USER')
H_PASS = os.getenv('HUB_PASSWORD')
SELENIUM_COMMAND_EXECUTOR = 'http://{}:{}@45.79.131.228:4444'.format(H_USER, H_PASS)
SELENIUM_DRIVER_ARGUMENTS=[]
#SELENIUM_DRIVER_ARGUMENTS=['-headless'] # '--headless' if using chrome instead of firefox
BOT_NAME = 'SWE_Project'
SPIDER_MODULES = ['SWE_Project.spiders']
NEWSPIDER_MODULE = 'SWE_Project.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Leopard Bot'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 2
# Ensure that requests are downloaded at random time intervals as to prevent bot detection.
RANDOMIZE_DOWNLOAD_DELAY = True
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 4
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy_selenium.SeleniumMiddleware': 800,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
SPLASH_URL = 'https://xvd9vz4b-splash.scrapinghub.com/'
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'SWE_Project.pipelines.SweProjectPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
#!/usr/bin/env python
from AudioPython import *
from AudioPython.dsp import *
import sys
def test_pink_noise():
channels = ((pink_noise(amplitude=0.01),),)
samples = compute_samples(channels)
for i in range(1000):
yield_raw(samples)
if __name__ == "__main__":
test_pink_noise()
|
data_dir = '/home/cn/data/sample_tick/'
import os
import shutil # higher level file operations - more choices for error handling
os.path.join('usr', 'bin', 'spam') # join path
cur_dir = os.getcwd() # current working dir
os.chdir('/tmp'); os.getcwd() # move around
os.chdir('/home/cn/program/python/sandbox');
os.getcwd()
if not os.path.exists('/tmp/blah'):
os.mkdir('/tmp/blah')
os.rmdir('/tmp/blah') # only work if the dir is empty
shutil.rmtree('/tmp/blah', ignore_errors=True) # works for most dir - shutils is more adaptable
## ABS and REL paths
os.path.abspath('.')
os.path.isabs('.')
os.path.relpath('/tmp/blah')
## deal with names - split names etc.
os.path.basename(os.path.join(os.getcwd(), 'test_file.py'))
os.path.dirname(os.path.join(os.getcwd(), 'test_file.py'))
os.path.split(os.path.join(os.getcwd(), 'test_file.py'))
# zip, unzip, tar, untar etc.
shutil.disk_usage('.')
# create a new file
if not os.path.exists('/tmp/to_arc'):
os.mkdir('/tmp/to_arc')
to_arc = '/tmp/to_arc/test_arc.txt'
with open(to_arc, 'a') as fh: # touch behavior - will throw if no immediate dir available
os.utime(to_arc, times=None)
fh.writelines('\n'.join(['ha', 'asdfjalsdjadf'])) # writelines does NOT add new lines. Genius!
shutil.get_archive_formats() # all supported formats - depending on other tools in the os
# make archive needs a dir to archive so you need to move everything into that dir first
# syntax is quite tricky
shutil.make_archive('/tmp/test_arc.txt', base_dir='to_arc', root_dir='/tmp', format='gztar') # zip or tar work too
shutil.unpack_archive(('/tmp/test_arc.txt.tar.gz'), extract_dir='/tmp/unpack/crazy')
for root, dirs, files in os.walk('/tmp/unpack/crazy'): ## hmm - need to review os.walk()
print(files)
# finding directory contents
base_dir = os.environ['HOME'] + '/data/sample_tick'
# first way:
kk = os.listdir(base_dir) # list things in that directory only - level 1
for name in kk:
name = os.path.join(base_dir, name)
print( name, ", is dir:", os.path.isdir(name), ", is file:", os.path.isfile(name))
# second way:
for cur_dir, subdirs, filenames in os.walk(base_dir):
""" per iteration, list all subdirs and filenames under cur_dir, then go deeper into subdirs in the
next iterations. It basically does a tree_walk
"""
print( 'the current dir is %s' % cur_dir)
for subdir in subdirs:
print('\tthe current subdir is %s' % subdir)
for filename in filenames:
print('\tthe current filename is %s' % filename)
# TODO: could use regex to detect if a file is a .gz or .csv file and then do some stuff with it
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/4 15:32
# @Author : QuietWoods
# @FileName: evaluate.py
# @Software: PyCharm
""" evaluation scripts"""
import re
import os
from os.path import join
import logging
import tempfile
import subprocess as sp
from cytoolz import curry
from pyrouge import Rouge155
from pyrouge.utils import log
try:
_ROUGE_PATH = os.environ['ROUGE']
except KeyError:
print('Warning: ROUGE is not configured')
_ROUGE_PATH = None
def eval_rouge(dec_dir, ref_dir,
cmd='-c 95 -r 1000 -n 2 -m', system_id=1):
""" evaluate by original Perl implementation"""
# silence pyrouge logging
assert _ROUGE_PATH is not None
log.get_global_console_logger().setLevel(logging.WARNING)
with tempfile.TemporaryDirectory() as tmp_dir:
Rouge155.convert_summaries_to_rouge_format(
dec_dir, join(tmp_dir, 'dec'))
Rouge155.convert_summaries_to_rouge_format(
ref_dir, join(tmp_dir, 'ref'))
Rouge155.write_config_static(
join(tmp_dir, 'dec'), "",
join(tmp_dir, 'ref'), "",
join(tmp_dir, 'settings.xml'), system_id
)
cmd = (join(_ROUGE_PATH, 'ROUGE-1.5.5.pl')
+ ' -e {} '.format(join(_ROUGE_PATH, 'data'))
+ cmd
+ ' -a {}'.format(join(tmp_dir, 'settings.xml')))
output = sp.check_output(cmd.split(' '), universal_newlines=True)
return output
try:
_METEOR_PATH = os.environ['METEOR']
except KeyError:
print('Warning: METEOR is not configured')
_METEOR_PATH = None
def eval_meteor(dec_file, ref_file):
""" METEOR evaluation"""
assert _METEOR_PATH is not None
cmd = 'java -Xmx2G -jar {} {} {} -l en -norm'.format(
_METEOR_PATH, dec_file, ref_file)
output = sp.check_output(cmd.split(' '), universal_newlines=True)
return output
|
import requests
import config
import re
def login_to_vk(username, password):
session = requests.Session()
first_page = session.get(config.VK_MAIN_PAGE).text
ip_h = re.compile(r'ip_h=(.+?)&').search(first_page).group(1)
lg_h = re.compile(r'lg_h=(.+?)&').search(first_page).group(1)
data_to_send = config.VK_POST_DATA.format(ip_h, lg_h, username, password)
session.post(config.VK_POST_PAGE, data=data_to_send)
return session
def login_to_vk2(username, password):
session = requests.Session()
first_page = session.get(config.VK_MAIN_PAGE).text
submit_to = re.compile(r'method="post" action="(.+?)"').search(first_page).group(1)
data_to_send = config.VK_POST_DATA2.format(username, password)
session.post(submit_to, data=data_to_send)
print(session.get(config.VK_FEED_PAGE).text)
return session
|
"""using the different database object in the settings.
So the Spanglish app uses a different database / server
than other apps.
"""
import logging
logger = logging.getLogger('spanglish')
APP_LABEL = 'spanglish'
DB = 'spanglish'
class DBRouter(object):
"""A router to control Q and Devops db operations."""
def db_for_read(self, model, **hints):
"""Attempt to read auth models go to auth_db."""
if model._meta.app_label == APP_LABEL:
return DB
return None
def db_for_write(self, model, **hints):
"""Attempt to write spanglish models go to Spanglish db."""
if model._meta.app_label == APP_LABEL:
return DB
return None
def allow_relation(self, obj1, obj2, **hints):
"""Allow relations if a model in the spanglish app is involved."""
if obj1._meta.app_label == APP_LABEL or \
obj2._meta.app_label == APP_LABEL:
return True
return None
|
import os
import re
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='rtorrent_magnet')
pat = re.compile('xt=urn:btih:([^&/]+)')
class PluginRtorrentMagnet:
"""
Process Magnet URI's into rtorrent compatible torrent files
Magnet URI's will look something like this:
magnet:?xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Name
rTorrent would expect to see something like meta-URL_Escaped_Torrent_Name.torrent
The torrent file must also contain the text:
d10:magnet-uri88:xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Namee
This plugin will check if a download URL is a magnet link, and then create the appropriate torrent file.
Example:
rtorrent_magnet: ~/torrents/
"""
schema = {'type': 'string', 'format': 'path'}
def write_torrent_file(self, task, entry, path):
path = os.path.join(path, 'meta-%s.torrent' % entry['title'])
path = os.path.expanduser(path)
if task.options.test:
logger.info('Would write: {}', path)
else:
logger.info('Writing rTorrent Magnet File: {}', path)
with open(path, 'w') as f:
f.write('d10:magnet-uri%d:%se' % (len(entry['url']), entry['url']))
entry['output'] = path
# Run after download plugin to only pick up entries it did not already handle
@plugin.priority(0)
def on_task_output(self, task, config):
for entry in task.accepted:
if 'output' in entry:
logger.debug(
'Ignoring, {} already has an output file: {}', entry['title'], entry['output']
)
continue
for url in entry.get('urls', [entry['url']]):
if url.startswith('magnet:'):
logger.debug('Magnet URI detected for url {} ({})', url, entry['title'])
if pat.search(url):
self.write_torrent_file(task, entry, entry.get('path', config))
break
else:
logger.warning('Unrecognized Magnet URI Format: {}', url)
@event('plugin.register')
def register_plugin():
plugin.register(PluginRtorrentMagnet, 'rtorrent_magnet', api_ver=2)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 13 02:12:00 2021
@author: J
"""
import time
import cv2
import numpy as np
class ObjectDetection:
def __init__(self):
self.MODEL = cv2.dnn.readNet(
'models/yolov3-tiny.weights',
'models/yolov3-tiny.cfg'
)
self.CLASSES = []
with open("models/coco.names", "r") as f:
self.CLASSES = [line.strip() for line in f.readlines()]
self.OUTPUT_LAYERS = [self.MODEL.getLayerNames()[i[0] - 1] for i in self.MODEL.getUnconnectedOutLayers()]
self.COLORS = np.random.uniform(0, 255, size=(len(self.CLASSES), 3))
self.COLORS /= (np.sum(self.COLORS**2, axis=1)**0.5/255)[np.newaxis].T
def detectObj(self, snap):
height, width, channels = snap.shape
blob = cv2.dnn.blobFromImage(snap, 1/255, (416, 416), swapRB=True, crop=False)
self.MODEL.setInput(blob)
outs = self.MODEL.forward(self.OUTPUT_LAYERS)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0]*width)
center_y = int(detection[1]*height)
w = int(detection[2]*width)
h = int(detection[3]*height)
# Rectangle coordinates
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(self.CLASSES[class_ids[i]])
color = self.COLORS[i]
cv2.rectangle(snap, (x, y), (x + w, y + h), color, 2)
cv2.putText(snap, label, (x, y - 5), font, 2, color, 2)
return snap
class VideoStreaming(object):
def __init__(self):
super(VideoStreaming, self).__init__()
self.VIDEO = cv2.VideoCapture(0)
cv2.destroyAllWindows()
self.MODEL = ObjectDetection()
self._preview = True
self._flipH = False
self._detect = False
self._exposure = self.VIDEO.get(cv2.CAP_PROP_EXPOSURE)
self._contrast = self.VIDEO.get(cv2.CAP_PROP_CONTRAST)
@property
def preview(self):
return self._preview
@preview.setter
def preview(self, value):
self._preview = bool(value)
@property
def flipH(self):
return self._flipH
@flipH.setter
def flipH(self, value):
self._flipH = bool(value)
@property
def detect(self):
return self._detect
@detect.setter
def detect(self, value):
self._detect = bool(value)
@property
def exposure(self):
return self._exposure
@exposure.setter
def exposure(self, value):
self._exposure = value
self.VIDEO.set(cv2.CAP_PROP_EXPOSURE, self._exposure)
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, value):
self._contrast = value
self.VIDEO.set(cv2.CAP_PROP_CONTRAST, self._contrast)
def show(self):
while(self.VIDEO.isOpened()):
ret, snap = self.VIDEO.read()
if self.flipH:
snap = cv2.flip(snap, 1)
if ret == True:
if self._preview:
# snap = cv2.resize(snap, (0, 0), fx=0.5, fy=0.5)
if self.detect:
snap = self.MODEL.detectObj(snap)
else:
snap = np.zeros((
int(self.VIDEO.get(cv2.CAP_PROP_FRAME_HEIGHT)),
int(self.VIDEO.get(cv2.CAP_PROP_FRAME_WIDTH))
), np.uint8)
label = 'camera disabled'
H, W = snap.shape
font = cv2.FONT_HERSHEY_PLAIN
color = (255,255,255)
cv2.putText(snap, label, (W//2 - 100, H//2), font, 2, color, 2)
frame = cv2.imencode('.jpg', snap)[1].tobytes()
cv2.destroyAllWindows()
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
time.sleep(0.01)
else:
break
print('off')
|
"""
Implement weak defense model for Athena on top of IBM ART.
It wraps a keras model to a weak defense in Athena ensemble.
@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import six
from art.classifiers.classifier import Classifier, ClassifierNeuralNetwork, ClassifierGradients
from models.image_processor import transform
logger = logging.getLogger(__name__)
class WeakDefense(ClassifierNeuralNetwork, ClassifierGradients, Classifier):
def __init__(self, model, trans_configs, use_logits=False, channel_index=3,
clip_values=(0., 1.), input_layer=0, output_layer=0, ):
super(WeakDefense, self).__init__(clip_values=clip_values,
preprocessing_defences=None,
postprocessing_defences=None,
preprocessing=(0, 1),
channel_index=channel_index, )
self._model = model
self._trans_configs = trans_configs
self._channel_index = channel_index
self._input_layer = input_layer
self._output_layer = output_layer
if "<class 'tensorflow" in str(type(model)):
self.is_tensorflow = True
elif "<class 'keras" in str(type(model)):
self.is_tensorflow = False
else:
raise TypeError("Type of model not recognized:" + str(type(model)))
self._initialize_params(model, use_logits, input_layer, output_layer)
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
# Check shape of `x` because of custom function for `_loss_gradients`
if self._input_shape != x.shape[1:]:
raise ValueError(
"Error when checking x: expected x to have shape {} but got array with shape {}".format(
self._input_shape, x.shape[1:]
)
)
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
# Adjust the shape of y for loss functions that do not take labels in one-hot encoding
if self._reduce_labels:
y_preprocessed = np.argmax(y_preprocessed, axis=1)
# Compute gradients
gradients = self._loss_gradients([x_preprocessed, y_preprocessed])[0]
gradients = self._apply_preprocessing_gradient(x, gradients)
assert gradients.shape == x_preprocessed.shape
return gradients
def class_gradient(self, x, label=None, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:type label: `int` or `list`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
:rtype: `np.ndarray`
"""
# Check value of label for computing gradients
if not (
label is None
or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes()))
or (
isinstance(label, np.ndarray)
and len(label.shape) == 1
and (label < self.nb_classes()).all()
and label.shape[0] == x.shape[0]
)
):
raise ValueError("Label %s is out of range." % str(label))
# Check shape of `x` because of custom function for `_loss_gradients`
if self._input_shape != x.shape[1:]:
raise ValueError(
"Error when checking x: expected x to have shape {} but got array with shape {}".format(
self._input_shape, x.shape[1:]
)
)
self._init_class_gradients(label=label)
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
if label is None:
# Compute the gradients w.r.t. all classes
gradients = np.swapaxes(np.array(self._class_gradients([x_preprocessed])), 0, 1)
elif isinstance(label, (int, np.integer)):
# Compute the gradients only w.r.t. the provided label
gradients = np.swapaxes(np.array(self._class_gradients_idx[label]([x_preprocessed])), 0, 1)
assert gradients.shape == (x_preprocessed.shape[0], 1) + self.input_shape
else:
# For each sample, compute the gradients w.r.t. the indicated target class (possibly distinct)
unique_label = list(np.unique(label))
gradients = np.array([self._class_gradients_idx[l]([x_preprocessed]) for l in unique_label])
gradients = np.swapaxes(np.squeeze(gradients, axis=1), 0, 1)
lst = [unique_label.index(i) for i in label]
gradients = np.expand_dims(gradients[np.arange(len(gradients)), lst], axis=1)
gradients = self._apply_preprocessing_gradient(x, gradients)
return gradients
def predict(self, x, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:type x: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
:rtype: `np.ndarray`
"""
from art.config import ART_NUMPY_DTYPE
# Apply transformation
x_preprocessed = transform(x, self._trans_configs)
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x_preprocessed, y=None, fit=False)
# Run predictions with batching
predictions = np.zeros((x_preprocessed.shape[0], self.nb_classes()), dtype=ART_NUMPY_DTYPE)
for batch_index in range(int(np.ceil(x_preprocessed.shape[0] / float(batch_size)))):
begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preprocessed.shape[0])
predictions[begin:end] = self._model.predict([x_preprocessed[begin:end]])
# Apply postprocessing
predictions = self._apply_postprocessing(preds=predictions, fit=False)
return predictions
def fit(self, x, y, batch_size=128, nb_epochs=20, **kwargs):
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. These should be parameters supported by the
`fit_generator` function in Keras and will be passed to this function as such. Including the number of
epochs or the number of steps per epoch as part of this argument will result in as error.
:type kwargs: `dict`
:return: `None`
"""
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
# Adjust the shape of y for loss functions that do not take labels in one-hot encoding
if self._reduce_labels:
y_preprocessed = np.argmax(y_preprocessed, axis=1)
gen = generator_fit(x_preprocessed, y_preprocessed, batch_size)
steps_per_epoch = max(int(x_preprocessed.shape[0] / batch_size), 1)
self._model.fit_generator(gen, steps_per_epoch=steps_per_epoch, epochs=nb_epochs, **kwargs)
def fit_generator(self, generator, nb_epochs=20, **kwargs):
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in Keras, it will.
:type generator: :class:`.DataGenerator`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. These should be parameters supported by the
`fit_generator` function in Keras and will be passed to this function as such. Including the number of
epochs as part of this argument will result in as error.
:type kwargs: `dict`
:return: `None`
"""
from art.data_generators import KerasDataGenerator
# Try to use the generator as a Keras native generator, otherwise use it through the `DataGenerator` interface
if isinstance(generator, KerasDataGenerator) and \
(self.preprocessing_defences is None or self.preprocessing_defences == []) and \
self.preprocessing == (0, 1):
try:
self._model.fit_generator(generator.iterator, epochs=nb_epochs, **kwargs)
except ValueError:
logger.info('Unable to use data generator as Keras generator. Now treating as framework-independent.')
super(WeakDefense, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)
else:
super(WeakDefense, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)
@property
def layer_names(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
.. warning:: `layer_names` tries to infer the internal structure of the model.
This feature comes with no guarantees on the correctness of the result.
The intended order of the layers tries to match their order in the model, but this is not
guaranteed either.
"""
return self._layer_names
def get_activations(self, x, layer, batch_size):
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:type x: `np.ndarray`
:param layer: Layer for computing the activations
:type layer: `int` or `str`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
:rtype: `np.ndarray`
"""
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow.keras.backend as k
else:
import keras.backend as k
from art.config import ART_NUMPY_DTYPE
if isinstance(layer, six.string_types):
if layer not in self._layer_names:
raise ValueError("Layer name %s is not part of the graph." % layer)
layer_name = layer
elif isinstance(layer, int):
if layer < 0 or layer >= len(self._layer_names):
raise ValueError(
"Layer index %d is outside of range (0 to %d included)." % (layer, len(self._layer_names) - 1)
)
layer_name = self._layer_names[layer]
else:
raise TypeError("Layer must be of type `str` or `int`.")
layer_output = self._model.get_layer(layer_name).output
output_func = k.function([self._input], [layer_output])
if x.shape == self.input_shape:
x_expanded = np.expand_dims(x, 0)
else:
x_expanded = x
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False)
# Determine shape of expected output and prepare array
output_shape = output_func([x_preprocessed[0][None, ...]])[0].shape
activations = np.zeros((x_preprocessed.shape[0],) + output_shape[1:], dtype=ART_NUMPY_DTYPE)
# Get activations with batching
for batch_index in range(int(np.ceil(x_preprocessed.shape[0] / float(batch_size)))):
begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preprocessed.shape[0])
activations[begin:end] = output_func([x_preprocessed[begin:end]])[0]
return activations
def set_learning_phase(self, train):
"""
Set the learning phase for the backend framework.
:param train: True to set the learning phase to training, False to set it to prediction.
:type train: `bool`
"""
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow.keras.backend as k
else:
import keras.backend as k
if isinstance(train, bool):
self._learning_phase = train
k.set_learning_phase(int(train))
def nb_classes(self):
"""
Return the number of output classes.
:return: Number of classes in the data.
:rtype: `int`
"""
return self._nb_classes
def save(self, filename, path=None):
"""
Save a model to file in the format specific to the backend framework. For Keras, .h5 format is used.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `ART_DATA_PATH`.
:type path: `str`
:return: None
"""
import os
if path is None:
from art.config import ART_DATA_PATH
full_path = os.path.join(ART_DATA_PATH, filename)
else:
full_path = os.path.join(path, filename)
folder = os.path.split(full_path)[0]
if not os.path.exists(folder):
os.makedirs(folder)
self._model.save(str(full_path))
logger.info("Model saved in path: %s.", full_path)
def _init_class_gradients(self, label=None):
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow.keras.backend as k
else:
import keras.backend as k
if len(self._output.shape) == 2:
nb_outputs = self._output.shape[1]
else:
raise ValueError("Unexpected output shape for classification in Keras model.")
if label is None:
logger.debug("Computing class gradients for all %i classes.", self.nb_classes())
if not hasattr(self, "_class_gradients"):
class_gradients = [k.gradients(self._predictions_op[:, i], self._input)[0] for i in range(nb_outputs)]
self._class_gradients = k.function([self._input], class_gradients)
else:
if isinstance(label, int):
unique_labels = [label]
else:
unique_labels = np.unique(label)
logger.debug("Computing class gradients for classes %s.", str(unique_labels))
if not hasattr(self, "_class_gradients_idx"):
self._class_gradients_idx = [None for _ in range(nb_outputs)]
for current_label in unique_labels:
if self._class_gradients_idx[current_label] is None:
class_gradients = [k.gradients(self._predictions_op[:, current_label], self._input)[0]]
self._class_gradients_idx[current_label] = k.function([self._input], class_gradients)
def _initialize_params(self, model, use_logits, input_layer, output_layer, synthesis=True, num_synthesis=10):
"""
Initialize most parameters of the classifier. This is a convenience function called by `__init__` and
`__setstate__` to avoid code duplication.
:param model: Keras model
:type model: `keras.models.Model`
:param use_logits: True if the output of the model are logits.
:type use_logits: `bool`
:param input_layer: Which layer to consider as the Input when the model has multiple input layers.
:type input_layer: `int`
:param output_layer: Which layer to consider as the Output when the model has multiple output layers.
:type output_layer: `int`
"""
# pylint: disable=E0401
if self.is_tensorflow:
import tensorflow as tf
if tf.executing_eagerly():
raise ValueError("TensorFlow is executing eagerly. Please disable eager execution.")
import tensorflow.keras as keras
import tensorflow.keras.backend as k
else:
import keras
import keras.backend as k
if hasattr(model, "inputs"):
self._input_layer = input_layer
self._input = model.inputs[input_layer]
else:
self._input = model.input
self._input_layer = 0
if hasattr(model, "outputs"):
self._output = model.outputs[output_layer]
self._output_layer = output_layer
else:
self._output = model.output
self._output_layer = 0
_, self._nb_classes = k.int_shape(self._output)
self._input_shape = k.int_shape(self._input)[1:]
logger.debug(
"Inferred %i classes and %s as input shape for Keras classifier.", self.nb_classes(), str(self.input_shape)
)
self._use_logits = use_logits
# Get loss function
if not hasattr(self._model, "loss"):
logger.warning("Keras model has no loss set. Classifier tries to use `k.sparse_categorical_crossentropy`.")
loss_function = k.sparse_categorical_crossentropy
else:
if isinstance(self._model.loss, six.string_types):
loss_function = getattr(k, self._model.loss)
elif "__name__" in dir(self._model.loss) and self._model.loss.__name__ in [
"categorical_hinge",
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"binary_crossentropy",
"kullback_leibler_divergence",
]:
if self._model.loss.__name__ in ["categorical_hinge", "kullback_leibler_divergence"]:
loss_function = getattr(keras.losses, self._model.loss.__name__)
else:
loss_function = getattr(keras.backend, self._model.loss.__name__)
elif isinstance(
self._model.loss,
(
keras.losses.CategoricalHinge,
keras.losses.CategoricalCrossentropy,
keras.losses.SparseCategoricalCrossentropy,
keras.losses.BinaryCrossentropy,
keras.losses.KLDivergence,
),
):
loss_function = self._model.loss
else:
loss_function = getattr(k, self._model.loss.__name__)
# Check if loss function is an instance of loss function generator, the try is required because some of the
# modules are not available in older Keras versions
try:
flag_is_instance = isinstance(
loss_function,
(
keras.losses.CategoricalHinge,
keras.losses.CategoricalCrossentropy,
keras.losses.BinaryCrossentropy,
keras.losses.KLDivergence,
),
)
except AttributeError:
flag_is_instance = False
# Check if the labels have to be reduced to index labels and create placeholder for labels
if (
"__name__" in dir(loss_function)
and loss_function.__name__
in ["categorical_hinge", "categorical_crossentropy", "binary_crossentropy", "kullback_leibler_divergence"]
) or (self.is_tensorflow and flag_is_instance):
self._reduce_labels = False
label_ph = k.placeholder(shape=self._output.shape)
elif (
"__name__" in dir(loss_function) and loss_function.__name__ in ["sparse_categorical_crossentropy"]
) or isinstance(loss_function, keras.losses.SparseCategoricalCrossentropy):
self._reduce_labels = True
label_ph = k.placeholder(shape=[None,])
else:
raise ValueError("Loss function not recognised.")
# Define the loss using the loss function
if "__name__" in dir(loss_function,) and loss_function.__name__ in [
"categorical_crossentropy",
"sparse_categorical_crossentropy",
"binary_crossentropy",
]:
loss_ = loss_function(label_ph, self._output, from_logits=self._use_logits)
elif "__name__" in dir(loss_function) and loss_function.__name__ in [
"categorical_hinge",
"kullback_leibler_divergence",
]:
loss_ = loss_function(label_ph, self._output)
elif isinstance(
loss_function,
(
keras.losses.CategoricalHinge,
keras.losses.CategoricalCrossentropy,
keras.losses.SparseCategoricalCrossentropy,
keras.losses.KLDivergence,
keras.losses.BinaryCrossentropy,
),
):
loss_ = loss_function(label_ph, self._output)
loss_gradients = k.gradients(loss_, self._input)
if k.backend() == "tensorflow":
loss_gradients = loss_gradients[0]
elif k.backend() == "cntk":
raise NotImplementedError("Only TensorFlow and Theano support is provided for Keras.")
# Set loss, gradients and prediction functions
self._predictions_op = self._output
self._loss = loss_
self._loss_gradients = k.function([self._input, label_ph], [loss_gradients])
# Get the internal layer
self._layer_names = self._get_layers()
def _get_layers(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
"""
# pylint: disable=E0401
if self.is_tensorflow:
from tensorflow.keras.layers import InputLayer
else:
from keras.engine.topology import InputLayer
layer_names = [layer.name for layer in self._model.layers[:-1] if not isinstance(layer, InputLayer)]
logger.info("Inferred %i hidden layers on Keras classifier.", len(layer_names))
return layer_names
def __getstate__(self):
"""
Use to ensure `KerasClassifier` can be pickled.
:return: State dictionary with instance parameters.
:rtype: `dict`
"""
import time
state = self.__dict__.copy()
# Remove the unpicklable entries
del state["_model"]
del state["_input"]
del state["_output"]
del state["_predictions_op"]
del state["_loss"]
del state["_loss_gradients"]
del state["_layer_names"]
model_name = str(time.time()) + ".h5"
state["model_name"] = model_name
self.save(model_name)
return state
def __setstate__(self, state):
"""
Use to ensure `KerasClassifier` can be unpickled.
:param state: State dictionary with instance parameters to restore.
:type state: `dict`
"""
self.__dict__.update(state)
# Load and update all functionality related to Keras
# pylint: disable=E0401
import os
from art.config import ART_DATA_PATH
if self.is_tensorflow:
from tensorflow.keras.models import load_model
else:
from keras.models import load_model
full_path = os.path.join(ART_DATA_PATH, state["model_name"])
model = load_model(str(full_path))
self._model = model
self._initialize_params(model, state["_use_logits"], state["_input_layer"], state["_output_layer"])
def __repr__(self):
repr_ = (
"%s(model=%r, use_logits=%r, channel_index=%r, clip_values=%r, preprocessing_defences=%r, "
"postprocessing_defences=%r, preprocessing=%r, input_layer=%r, output_layer=%r)"
% (
self.__module__ + "." + self.__class__.__name__,
self._model,
self._use_logits,
self.channel_index,
self.clip_values,
self.preprocessing_defences,
self.postprocessing_defences,
self.preprocessing,
self._input_layer,
self._output_layer,
)
)
return repr_
def generator_fit(x, y, batch_size=128):
"""
Minimal data generator for randomly batching large datasets.
:param x: The data sample to batch.
:type x: `np.ndarray`
:param y: The labels for `x`. The first dimension has to match the first dimension of `x`.
:type y: `np.ndarray`
:param batch_size: The size of the batches to produce.
:type batch_size: `int`
:return: A batch of size `batch_size` of random samples from `(x, y)`
:rtype: `tuple(np.ndarray, np.ndarray)`
"""
while True:
indices = np.random.randint(x.shape[0], size=batch_size)
yield x[indices], y[indices]
|
import os
from flask import send_file
from slerp.logger import logging
from slerp.validator import Number, Blank
from entity.models import Document
log = logging.getLogger(__name__)
class DocumentService(object):
def __init__(self):
super(DocumentService, self).__init__()
@Blank(['filename', 'mimetype', 'folder', 'original_filename'])
def add_document(self, domain):
if 'secure' in domain and domain['secure'] == 'N':
domain['secure'] = False
else:
domain['secure'] = True
document = Document(domain)
document.save()
return {'payload': document.to_dict()}
@Number(['id'])
def get_document(self, domain):
document = Document.query.filter_by(id=domain['id']).first()
filename = os.path.join(document.folder,
document.filename if 'thumbnails' not in domain else document.thumbnails)
return send_file(filename, mimetype=document.mimetype if 'thumbnails' not in domain else 'image/png')
@Number(['id'])
def find_document_by_id(self, domain):
document = Document.query.filter_by(id=domain['id']).first()
return {'payload': document.to_dict()}
@Number(['id'])
def delete_document_by_id(self, domain):
document = Document.query.filter_by(id=domain['id']).first()
os.remove(os.path.join(document.folder, document.filename))
document.delete()
return {'payload': {'success': True}}
|
#pylint: disable=W0703, R0912,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
import logging
from pylons import request
from agent.lib import contextutils, manifestutil
LOG = logging.getLogger(__name__)
CTXNAMES = ['guid', 'service', 'thread_timeout', 'thread_progress_timeout']
class BaseController(WSGIController):
""" base controller class """
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
# before setting anything new, first reset the old values from previous request if any
contextutils.resetcontext(self)
#LOG.debug(environ)
if 'service' in environ['pylons.routes_dict']:
servicename = environ['pylons.routes_dict']['service']
#if not registered, agent will not try to replace
if servicename is not None and servicename.count('.') == 2:
servicename = manifestutil.expandServiceName(servicename)
LOG.info('service name expanded %s ' % servicename)
environ['pylons.routes_dict']['service'] = servicename
contextutils.injectcontext(self, {'service': servicename})
# get correlationid into context
if 'X-CORRELATIONID' in request.headers and request.headers['X-CORRELATIONID'] is not None:
contextutils.injectcontext(self, {'guid': request.headers['X-CORRELATIONID']})
else:
contextutils.injectcontext(self, {'guid': ''})
# get timeouts and inject into context
if 'X-THREAD_TIMEOUT' in request.headers and request.headers['X-THREAD_TIMEOUT'] is not None:
contextutils.injectcontext(self, {'thread_timeout': request.headers['X-AGENT_THREAD_TIMEOUT']})
# get progress timeouts and inject into context
if 'X-THREAD_PROGRESS_TIMEOUT' in request.headers and request.headers['X-THREAD_PROGRESS_TIMEOUT'] is not None:
contextutils.injectcontext(self, {'thread_progress_timeout': request.headers['X-THREAD_PROGRESS_TIMEOUT']})
# get remote address from request
remoteAddr = request.environ.get("X_FORWARDED_FOR", request.environ.get("REMOTE_ADDR"))
contextutils.injectcontext(self, {'remote_addr': remoteAddr})
reqChecksum = '%s,%s,%s' % (request.method, request.url, request.body)
contextutils.injectcontext(self, {'reqChecksum': reqChecksum})
return WSGIController.__call__(self, environ, start_response)
def injectJobCtx(self, target):
''' inject both guid and callback into an object
@param target: target
'''
contextutils.copycontexts(self, target, CTXNAMES)
|
""" main
This module contains a collection of YANG definitions
for sanity package.
This module contains definitions
for the following management objects\:
Copyright (c) 2013\-2014 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class MainA(object):
"""
.. attribute:: main_aug1_c
**type**\: :py:class:`MainAug1_C <ydk.models.main.MainA.MainAug1_C>`
.. attribute:: main_aug2_c
**type**\: :py:class:`MainAug2_C <ydk.models.main.MainA.MainAug2_C>`
.. attribute:: main_aug2_d
**type**\: :py:class:`MainAug2_D <ydk.models.main.MainA.MainAug2_D>`
.. attribute:: main_aug3_c
**type**\: :py:class:`MainAug3_C <ydk.models.main.MainA.MainAug3_C>`
.. attribute:: main_aug3_d
**type**\: :py:class:`MainAug3_D <ydk.models.main.MainA.MainAug3_D>`
.. attribute:: one
blah
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'main'
_revision = '2015-11-17'
def __init__(self):
self.main_aug1_c = MainA.MainAug1_C()
self.main_aug1_c.parent = self
self.main_aug2_c = MainA.MainAug2_C()
self.main_aug2_c.parent = self
self.main_aug2_d = MainA.MainAug2_D()
self.main_aug2_d.parent = self
self.main_aug3_c = MainA.MainAug3_C()
self.main_aug3_c.parent = self
self.main_aug3_d = MainA.MainAug3_D()
self.main_aug3_d.parent = self
self.one = None
class MainAug1_C(object):
"""
.. attribute:: two
blah
**type**\: str
"""
_prefix = 'aug1'
_revision = '2015-11-17'
def __init__(self):
self.parent = None
self.two = None
@property
def _common_path(self):
return '/main:main-A/main-aug1:main-aug1_C'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.two is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models._meta import _main as meta
return meta._meta_table['MainA.MainAug1_C']['meta_info']
class MainAug2_C(object):
"""
.. attribute:: three
blah
**type**\: int
**range:** \-32768..32767
"""
_prefix = 'aug2'
_revision = '2015-11-17'
def __init__(self):
self.parent = None
self.three = None
@property
def _common_path(self):
return '/main:main-A/main-aug2:main-aug2_C'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.three is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models._meta import _main as meta
return meta._meta_table['MainA.MainAug2_C']['meta_info']
class MainAug2_D(object):
"""
.. attribute:: poo
blah
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'aug2'
_revision = '2015-11-17'
def __init__(self):
self.parent = None
self.poo = None
@property
def _common_path(self):
return '/main:main-A/main-aug2:main-aug2_D'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.poo is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models._meta import _main as meta
return meta._meta_table['MainA.MainAug2_D']['meta_info']
class MainAug3_C(object):
"""
.. attribute:: meh
blah
**type**\: int
**range:** \-128..127
"""
_prefix = 'aug3'
_revision = '2015-11-17'
def __init__(self):
self.parent = None
self.meh = None
@property
def _common_path(self):
return '/main:main-A/main-aug3:main-aug3_C'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.meh is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models._meta import _main as meta
return meta._meta_table['MainA.MainAug3_C']['meta_info']
class MainAug3_D(object):
"""
.. attribute:: buh
blah
**type**\: str
"""
_prefix = 'aug3'
_revision = '2015-11-17'
def __init__(self):
self.parent = None
self.buh = None
@property
def _common_path(self):
return '/main:main-A/main-aug3:main-aug3_D'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.buh is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models._meta import _main as meta
return meta._meta_table['MainA.MainAug3_D']['meta_info']
@property
def _common_path(self):
return '/main:main-A'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.main_aug1_c is not None and self.main_aug1_c._has_data():
return True
if self.main_aug2_c is not None and self.main_aug2_c._has_data():
return True
if self.main_aug2_d is not None and self.main_aug2_d._has_data():
return True
if self.main_aug3_c is not None and self.main_aug3_c._has_data():
return True
if self.main_aug3_d is not None and self.main_aug3_d._has_data():
return True
if self.one is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models._meta import _main as meta
return meta._meta_table['MainA']['meta_info']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from datetime import datetime
import click
from loguru import logger
from colorama import init, Fore
VERSION = Fore.YELLOW + "2.0.0" + Fore.RESET
VERSION_NAME = Fore.GREEN + "Brazen Beaver" + Fore.RESET
VERSION_ART = f"""
___
.=" "=._.---.
." c ' Y'`p
/ , `. w_/
| '-. / /
_,..._| )_-\ \_=.\\
`-....-'`------)))`=-'"`'" v{VERSION} {VERSION_NAME}
"""
@click.command()
@click.option(
"-s",
"--start",
required=True,
type=click.DateTime(["%m-%d-%Y"]),
help="Starting date (MM-dd-yyyy)",
)
@click.option(
"-e",
"--end",
required=True,
type=click.DateTime(["%m-%d-%Y"]),
help="Ending date (MM-dd-yyyy)",
)
@click.option("-d", "--delete", default=False, is_flag=True, help="Dry run (no delete)")
@click.option(
"-p",
"--path",
default=".",
type=click.Path(file_okay=False, writable=True, exists=True),
help="Path to download files to",
)
@click.version_option(version=VERSION, message=f"%(version)s {VERSION_NAME}")
def main(
start: datetime,
end: datetime,
delete: bool,
path: str,
):
print(VERSION_ART)
if start >= end:
logger.critical(f"Start date is after end date. Cant manipulate space time")
sys.exit(1)
if __name__ == "__main__":
init()
main()
|
import sys
import hashtable
# defaults
N = 1000
m = 10
func = "f2"
filename = "plot.png"
if len(sys.argv) == 5:
N = int(sys.argv[1])
m = int(sys.argv[2])
func = sys.argv[3]
filename = sys.argv[4]
# get our hashfunc
if func == "f1":
h = hashtable.h_mod(m)
elif func == "f2":
h = hashtable.h_mul(0.2,m)
elif func == "f3":
h = hashtable.h_mul(0.618034,m)
else:
h = hashtable.h_mul(0.8,m)
htable = hashtable.HashTable(m,h)
import random
for i in range(N):
x = hashtable.htentry(random.randint(0,sys.maxint))
htable.insert(x)
largest = 0;
lengths = []
for i in range(m):
length = len(htable.T[i])
lengths.append(length)
largest = length if length > largest else largest
print "slot %d had %d collisions" % (i,length)
print "Largest collision was %d" % largest
import matplotlib.pyplot as plt
from pylab import savefig
plt.bar(range(m),lengths)
plt.title("Collisions per slot using %s N=%d m=%d" % (func,N,m))
plt.ylabel("Collisions")
plt.xlabel("Slot")
savefig(filename, bbox_inches='tight')
|
__version__ = '1.0.1'
from platformer import PlatformerApp
if __name__ == "__main__":
PlatformerApp().run()
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from library.models import PressReview, PressReviewForm
class PressReviewAdmin(admin.ModelAdmin):
form = PressReviewForm
list_display = ('date', 'link',)
search_fields = ('date',)
admin.site.register(PressReview, PressReviewAdmin)
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from traits.api import HasTraits, Str, Float, List, Any, Color, Property
from traitsui.api import View, Item, Group, HGroup, TableEditor, Handler, RangeEditor
from traitsui.table_column import ObjectColumn
# =============standard library imports ========================
import os
import glob
import six.moves.cPickle as pickle
import copy
# =============local library imports ==========================
from pychron.paths import paths
def get_user_views():
return glob.glob(os.path.join(paths.hidden_dir, "userview*"))
class ViewControllerHandler(Handler):
def closed(self, info, is_ok):
""" """
# delete any previous view
# if they exist they will be rewritten below
for uvi in get_user_views():
os.remove(uvi)
obj = info.object.views
for i, v in enumerate(obj):
name = "userview{}".format(i)
with open(os.path.join(paths.hidden_dir, name), "w") as f:
pickle.dump(v, f)
super(ViewControllerHandler, self).closed(info, is_ok)
class UserView(HasTraits):
""" """
x = Float
y = Float
z = Float
rx = Float
ry = Float
rz = Float
scene_graph = Any(transient=True)
rmin = Float(0)
rmax = Float(360)
xmin = Float(-50)
xmax = Float(10)
ymin = Float(-50)
ymax = Float(10)
zoom = Property(depends_on="_zoom")
_zoom = Float(1)
zmin = Float(1)
zmax = Float(100)
name = Str
key = Str
background_color = Color
def _get_zoom(self):
return self._zoom / 0.02
def _set_zoom(self, v):
self._zoom = v * 0.02
def _anytrait_changed(self, name, old, new):
""" """
if self.scene_graph is not None:
self.scene_graph.reset_view()
self.scene_graph.root[0].translate = [self.x, self.y, self.z]
m, rot = self.scene_graph.calc_rotation_matrix(self.rx, self.ry, self.rz)
self.scene_graph.canvas.thisrotation = rot
self.scene_graph.root[0].matrix = m
self.scene_graph.root[0].scale = (self._zoom,) * 3 # (self.zoom * 0.02,)*3
try:
self.scene_graph.root[1].matrix = m
except IndexError:
pass
color = [c / 255.0 for c in self.background_color]
self.scene_graph.canvas.set_background_color(color)
self.scene_graph.canvas.Refresh()
class ViewController(HasTraits):
""" """
views = List
scene_graph = Any # (transient = True)
# def __init__(self, *args, **kw):
# super(ViewController, self).__init__(*args, **kw)
def _views_default(self):
return self.views_factory()
# def _bu_fired(self):
# '''
# '''
# self.views.append(UserView(name = 'view1', key = 'v', scene_graph = self.scene_graph))
# def _views_default(self):
# '''
# '''
def views_factory(self):
""" """
# if os.path.exists(picklepath):
uvfs = get_user_views()
if uvfs:
px = []
for pa in uvfs:
with open(pa, "r") as f:
try:
pi = pickle.load(f)
pi.scene_graph = self.scene_graph
px.append(pi)
except ImportError:
pass
return px
else:
return (
[]
) # UserView(name = 'home', key = 'h', scene_graph = self.scene_graph)]
def _scene_graph_changed(self):
for v in self.views:
v.scene_graph = self.scene_graph
def row_factory(self):
""" """
if len(self.views):
v = copy.copy(self.views[-1])
v.scene_graph = self.scene_graph
v.name = "userview{}".format(len(self.views) + 1)
else:
v = UserView(scene_graph=self.scene_graph, name="userview1")
self.scene_graph.canvas.user_views.append(v)
def _table_editor_factory(self):
""" """
col = [ObjectColumn(name="name"), ObjectColumn(name="key")]
return TableEditor(
columns=col,
auto_size=False,
orientation="vertical",
show_toolbar=True,
row_factory=self.row_factory,
deletable=True,
edit_view=View(
Group(
HGroup("name", "key"),
Item(
"x",
editor=RangeEditor(
low_name="xmin", high_name="xmax", mode="slider"
),
),
Item(
"y",
editor=RangeEditor(
low_name="xmin", high_name="xmax", mode="slider"
),
),
Item(
"z",
editor=RangeEditor(
low_name="xmin", high_name="xmax", mode="slider"
),
),
Item(
"rx",
editor=RangeEditor(
low_name="rmin", high_name="rmax", mode="slider"
),
),
Item(
"ry",
editor=RangeEditor(
low_name="rmin", high_name="rmax", mode="slider"
),
),
Item(
"rz",
editor=RangeEditor(
low_name="rmin", high_name="rmax", mode="slider"
),
),
Item(
"zoom",
editor=RangeEditor(
low_name="zmin", high_name="zmax", mode="slider"
),
),
Group(
"background_color"
# , style = 'custom'
),
show_border=True,
),
resizable=True,
),
)
def traits_view(self):
""" """
return View(
Item(
"views",
height=75,
editor=self._table_editor_factory(),
show_label=False,
),
resizable=True,
width=375,
height=675,
handler=ViewControllerHandler,
title="User Canvas Views",
)
if __name__ == "__main__":
vm = ViewController()
vm.configure_traits()
|
# --------------------------
# UFSC - CTC - INE - INE5603
# Exercício calculos
# --------------------------
# Classe responsável pela interação com o usuário
from view.menu import Menu
from view.paineis.painel_media3 import PainelMedia3
from view.paineis.painel_soma3 import PainelSoma3
from view.paineis.painel_par import PainelPar
from view.paineis.painel_menor3 import PainelMenor3
from view.paineis.painel_maior_que import PainelMaiorQue
from view.paineis.painel_divisivel_por import PainelDivisivelPor
from view.paineis.painel_multiplica import PainelMultiplica
from view.paineis.painel_divide import PainelDivide
from view.paineis.painel_bissexto import PainelBissexto
from view.paineis.painel_mdc import PainelMDC
from view.paineis.painel_soma_divisores import PainelSomaDivisores
from view.paineis.painel_amigos import PainelAmigos
from view.paineis.painel_primo import PainelPrimo
from view.paineis.painel_composto import PainelComposto
class InterfaceComUsuario:
def __init__(self):
opcoes = {
0 : ('Sair do Programa', None),
1 : ('Média de três números', PainelMedia3),
2 : ('Soma de três números', PainelSoma3),
3 : ('Menor de três números', PainelMenor3),
4 : ('Número Par', PainelPar),
5 : ('Maior que', PainelMaiorQue),
6 : ('Divisível por', PainelDivisivelPor),
7 : ('Multiplica', PainelMultiplica),
8 : ('Divisão Inteira', PainelDivide),
9 : ('Ano Bissexto', PainelBissexto),
10 : ('Máximo Divisor Comum', PainelMDC),
11 : ('Soma dos Divisores', PainelSomaDivisores),
12 : ('Números Amigos', PainelAmigos),
13 : ('Número Primo', PainelPrimo),
14 : ('Número Composto', PainelComposto)
}
menu = Menu('Exercício Cálculos', opcoes)
self._menu = menu
def interaja(self):
opcao = self._menu.pergunte()
while opcao is not None:
try:
opcao().execute()
except Exception:
print('** Erro na execução!!')
input('Tecle <enter> para continuar.')
opcao = self._menu.pergunte()
|
import sys
print("example_venv_base_simple.py", "execution")
path = sys.base_prefix if hasattr(sys, "base_prefix") else sys.prefix
print(path, sys.prefix)
|
"""
Создать класс TrafficLight (светофор).
определить у него один атрибут color (цвет) и метод running (запуск);
атрибут реализовать как приватный;
в рамках метода реализовать переключение светофора в режимы: красный, жёлтый, зелёный;
продолжительность первого состояния (красный) составляет 7 секунд, второго (жёлтый) — 2 секунды, третьего (зелёный) —
на ваше усмотрение;
переключение между режимами должно осуществляться только в указанном порядке (красный, жёлтый, зелёный);
проверить работу примера, создав экземпляр и вызвав описанный метод.
"""
from time import sleep
class TrafficLight:
__color = ''
__colors = ['red', 'yellow', 'green']
def change_color(self, i):
self.__color = i
print(self.__color)
def running(self):
for clr in self.__colors:
if clr == 'red':
self.change_color(clr)
sleep(7)
elif clr == 'yellow':
self.change_color(clr)
sleep(2)
else:
self.change_color(clr)
sleep(8)
traffic_light = TrafficLight()
traffic_light.running()
|
# --------------------------------------------------------
# Deep Iterative Matching Network
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Gu Wang, Yi Li
# --------------------------------------------------------
"""
generate rendered from rendered poses
generate (observed rendered) pair list file for training (or test)
"""
from __future__ import print_function, division
import numpy as np
import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(cur_path, ".."))
from lib.utils.mkdir_if_missing import mkdir_if_missing
from lib.render_glumpy.render_py import Render_Py
import lib.pair_matching.RT_transform as se3
import cv2
import random
random.seed(2333)
np.random.seed(2333)
from tqdm import tqdm
LM6d_occ_dsm_root = os.path.join(
cur_path, "..", "data/LINEMOD_6D/LM6d_converted/LM6d_occ_dsm"
)
image_set_dir = os.path.join(LM6d_occ_dsm_root, "image_set")
observed_set_dir = os.path.join(LM6d_occ_dsm_root, "image_set/observed")
idx2class = {
1: "ape",
5: "can",
6: "cat",
8: "driller",
9: "duck",
10: "eggbox",
11: "glue",
12: "holepuncher",
}
classes = idx2class.values()
classes = sorted(classes)
def class2idx(class_name, idx2class=idx2class):
for k, v in idx2class.items():
if v == class_name:
return k
rendered_pose_dir = os.path.join(LM6d_occ_dsm_root, "ds_rendered_poses")
# output dir
rendered_root_dir = os.path.join(LM6d_occ_dsm_root, "data/rendered")
mkdir_if_missing(rendered_root_dir)
# config for renderer
width = 640
height = 480
K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])
ZNEAR = 0.25
ZFAR = 6.0
depth_factor = 1000
# output_path
version = "v1"
def main():
gen_images = True
for class_idx, class_name in enumerate(tqdm(classes)):
train_pair = []
print("start ", class_name)
if class_name in ["__back_ground__"]:
continue
if gen_images:
# init render machine
model_dir = os.path.join(
cur_path,
"../data/LINEMOD_6D/LM6d_converted/models/{}".format(class_name),
)
render_machine = Render_Py(model_dir, K, width, height, ZNEAR, ZFAR)
for set_type in ["NDtrain"]:
# observed index list
observed_list_path = os.path.join(
observed_set_dir, "NDtrain_observed_{}.txt".format(class_name)
)
with open(observed_list_path, "r") as f:
observed_list = [x.strip() for x in f.readlines()]
# rendered poses
rendered_pose_path = os.path.join(
rendered_pose_dir,
"LM6d_occ_dsm_{}_NDtrain_rendered_pose_{}.txt".format(
version, class_name
),
)
with open(rendered_pose_path, "r") as f:
str_rendered_pose_list = [x.strip().split(" ") for x in f.readlines()]
rendered_pose_list = np.array(
[[float(x) for x in each_pose] for each_pose in str_rendered_pose_list]
)
rendered_per_observed = 1
assert len(rendered_pose_list) == 1 * len(observed_list), "{} vs {}".format(
len(rendered_pose_list), len(observed_list)
)
for idx, observed_index in enumerate(tqdm(observed_list)):
video_name, observed_prefix = observed_index.split("/") # ./prefix
rendered_dir = os.path.join(rendered_root_dir, video_name)
mkdir_if_missing(rendered_dir)
rendered_dir = os.path.join(rendered_dir, class_name)
mkdir_if_missing(rendered_dir)
for inner_idx in range(rendered_per_observed):
if gen_images:
image_file = os.path.join(
rendered_dir,
"{}_{}-color.png".format(observed_prefix, inner_idx),
)
depth_file = os.path.join(
rendered_dir,
"{}_{}-depth.png".format(observed_prefix, inner_idx),
)
# if os.path.exists(image_file) and os.path.exists(depth_file):
# continue
rendered_idx = idx * rendered_per_observed + inner_idx
pose_rendered_q = rendered_pose_list[rendered_idx]
rgb_gl, depth_gl = render_machine.render(
pose_rendered_q[:4], pose_rendered_q[4:]
)
rgb_gl = rgb_gl.astype("uint8")
depth_gl = (depth_gl * depth_factor).astype(np.uint16)
cv2.imwrite(image_file, rgb_gl)
cv2.imwrite(depth_file, depth_gl)
pose_rendered_file = os.path.join(
rendered_dir,
"{}_{}-pose.txt".format(observed_prefix, inner_idx),
)
text_file = open(pose_rendered_file, "w")
text_file.write("{}\n".format(class2idx(class_name)))
pose_rendered_m = np.zeros((3, 4))
pose_rendered_m[:, :3] = se3.quat2mat(pose_rendered_q[:4])
pose_rendered_m[:, 3] = pose_rendered_q[4:]
pose_ori_m = pose_rendered_m
pose_str = "{} {} {} {}\n{} {} {} {}\n{} {} {} {}".format(
pose_ori_m[0, 0],
pose_ori_m[0, 1],
pose_ori_m[0, 2],
pose_ori_m[0, 3],
pose_ori_m[1, 0],
pose_ori_m[1, 1],
pose_ori_m[1, 2],
pose_ori_m[1, 3],
pose_ori_m[2, 0],
pose_ori_m[2, 1],
pose_ori_m[2, 2],
pose_ori_m[2, 3],
)
text_file.write(pose_str)
train_pair.append(
"{} {}/{}_{}".format(
observed_index, class_name, observed_prefix, inner_idx
)
)
pair_set_file = os.path.join(
image_set_dir, "train_{}.txt".format(class_name)
)
train_pair = sorted(train_pair)
with open(pair_set_file, "w") as text_file:
for x in train_pair:
text_file.write("{}\n".format(x))
print(class_name, " done")
def check_observed_rendered():
from lib.utils.utils import read_img
import matplotlib.pyplot as plt
observed_dir = os.path.join(LM6d_occ_dsm_root, "data/observed")
for class_idx, class_name in enumerate(tqdm(classes)):
if class_name != "driller":
continue
print(class_name)
observed_list_path = os.path.join(
observed_set_dir, "NDtrain_observed_{}.txt".format(class_name)
)
with open(observed_list_path, "r") as f:
observed_list = [x.strip() for x in f.readlines()]
for idx, observed_index in enumerate(observed_list):
print(observed_index)
prefix = observed_index.split("/")[1]
color_observed = read_img(
os.path.join(observed_dir, prefix + "-color.png"), 3
)
color_rendered = read_img(
os.path.join(rendered_root_dir, class_name, prefix + "_0-color.png"), 3
)
fig = plt.figure()
plt.axis("off")
plt.subplot(1, 2, 1)
plt.imshow(color_observed[:, :, [2, 1, 0]])
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(color_rendered[:, :, [2, 1, 0]])
plt.axis("off")
plt.show()
if __name__ == "__main__":
main()
# check_observed_rendered()
|
# -*- coding: UTF-8 -*-
class PandasticSearchException(RuntimeError):
def __init__(self, msg):
super(PandasticSearchException, self).__init__(msg)
class NoSuchDependencyException(PandasticSearchException):
pass
class ServerDefinedException(PandasticSearchException):
pass
class ParseResultException(PandasticSearchException):
pass
class DataFrameException(PandasticSearchException):
pass
|
import debug_toolbar
from django.conf.urls import url, include
urlpatterns = [url(r"^", include(debug_toolbar.urls))]
|
kgVegetable = float(input())
kgFruit = float(input())
sumVegetable = int(input())
sumFruit = int(input())
priceVegetable = kgVegetable * sumVegetable
priceFruit = kgFruit * sumFruit
bgl = priceVegetable + priceFruit
eur = bgl / 1.94
print(priceVegetable)
print(priceFruit)
print(eur)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-24 18:11
from __future__ import unicode_literals
from __future__ import absolute_import
from django.core.management import call_command
from django.db import migrations
from corehq.privileges import REPORT_BUILDER_5
def _grandfather_reportbuilder_5_pro(apps, schema_editor):
call_command(
'cchq_prbac_grandfather_privs',
REPORT_BUILDER_5,
skip_edition='Community,Standard',
noinput=True,
)
class Migration(migrations.Migration):
dependencies = [
('accounting', '0015_grandfather_login_as'),
]
operations = [
migrations.RunPython(_grandfather_reportbuilder_5_pro),
]
|
import unittest
import test_utils
from gcp_census.bigquery.bigquery_table_metadata import BigQueryTableMetadata
from gcp_census.bigquery.transformers.partition_metadata_v1_0 import \
PartitionMetadataV1_0
class TestPartitionMetadataV1_0(unittest.TestCase):
def test_transforming_table_without_labels(self):
# given
table = {
'kind': 'bigquery#table',
'etag': '\'smpMas70-D1-zV2oEH0ud6qY21c/MTQ2ODQxNDY2MDU3Mg\'',
'id': 'dev-manager:crm_raw.account_1_0_0$20150603',
'tableReference': {
'projectId': 'dev-manager',
'datasetId': 'crm_raw',
'tableId': 'account_1_0_0$20150603'
},
"description": "secs\n\njhbhgvhgv\n\nlorem",
'numBytes': '421940',
'numLongTermBytes': '421940',
'numRows': '1445',
'creationTime': '1468414660572',
'lastModifiedTime': '1468414660572',
'type': 'TABLE',
'location': 'US'
}
# when
data = PartitionMetadataV1_0(BigQueryTableMetadata(table)).transform()
# then
self.assertEqual('account_1_0_0', data['tableId'])
self.assertEqual('20150603', data['partitionId'])
def test_should_ignore_timepartitioning_field(self):
# given
table = test_utils.create_minimal_table_dict()
table['timePartitioning'] = {
'type': 'DAY',
'expirationMs': '259200000',
'field': 'transaction_date'
}
# when
data = PartitionMetadataV1_0(BigQueryTableMetadata(table)).transform()
# then
self.assertEqual('DAY', data['timePartitioning']['type'])
self.assertEqual('259200000', data['timePartitioning']['expirationMs'])
self.assertFalse('field' in data['timePartitioning'])
def test_should_parse_timepartitioning_without_expiration_ms(self):
# given
table = test_utils.create_minimal_table_dict()
table['timePartitioning'] = {
'type': 'DAY',
}
# when
data = PartitionMetadataV1_0(BigQueryTableMetadata(table)).transform()
# then
self.assertEqual('DAY', data['timePartitioning']['type'])
self.assertFalse('expirationMs' in data['timePartitioning'])
|
'''Trains a simple sentiment analysis model.
'''
from __future__ import print_function
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import pickle
batch_size = 100
num_classes = 2
epochs = 10
N_X = 423 # len(train_x[0])
layer1_size = 200
# the data, shuffled and split between train and test sets
x_train, y_train, x_test, y_test = pickle.load( open('tmp/sentiment_set.pickle', 'rb' ) )
x_train = x_train.toarray()
x_test = x_test.toarray()
x_train /= np.max(x_train)
x_test /= np.max(x_test)
print(x_train.shape, y_train.shape, 'train samples,', type(x_train[0][0]), ' ', type(y_train[0][0]))
print(x_test.shape, y_test.shape, 'test samples,', type(x_test[0][0]), ' ', type(y_train[0][0]))
# convert class vectors to binary class matrices. Our input already made this. No need to do it again
# y_train = keras.utils.to_categorical(y_train, num_classes)
# y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(layer1_size, activation='relu', input_shape=(N_X,)))
model.add(Dropout(0.2))
# Already overfitting, no need to add this extra layer
# model.add(Dense(layer1_size, activation='relu'))
# model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test)
)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
# Copyright 2021, Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic to generate response data for a date request."""
from datetime import timedelta
from mycroft.util.format import nice_date, nice_duration
from mycroft.util.time import now_local
from .util import extract_datetime_from_utterance
class Response:
"""Logic to generate response data for a date request."""
def __init__(self):
self.date_time = None
self.dialog_name = None
self.dialog_data = None
@property
def speakable_date(self) -> str:
"""Syntactic sugar to give context to why we are calling nice_date
Returns:
The date formatted in a string that can be spoken by a TTS engine.
"""
return nice_date(self.date_time)
def build_current_date_response(self):
"""Generate the data needed to respond to a current date request."""
self.date_time = now_local()
self.dialog_name = "date"
self.dialog_data = dict(date=self.speakable_date)
def build_relative_date_response(self, utterance: str):
"""Generate the data needed to respond to a relative date request.
Args:
utterance: The words spoken by the user to initiate the request.
"""
self.date_time = extract_datetime_from_utterance(utterance)
if self.date_time is not None:
duration = self._determine_relative_duration()
if duration.days >= 0:
speakable_duration = nice_duration(duration)
self.dialog_name = "date-relative-future"
else:
speakable_duration = nice_duration(abs(duration))
self.dialog_name = "date-relative-past"
self.dialog_data = dict(
date=self.speakable_date, num_days=speakable_duration
)
def _determine_relative_duration(self) -> timedelta:
"""Determine the number of days from the current date requested by the user.
Returns:
The amount of time away from the current date requested by the user
"""
relative_date = self.date_time.date()
today = now_local().date()
duration = relative_date - today
return duration
|
from model import Igra, bazen_besed
import random
def izpis_igre(igra):
if not igra.zmaga() and not igra.poraz():
return "Nadaljujmo z igro."
return None
def izpis_zmage(igra):
if igra.zmaga():
return "Zmagali ste!"
return None
def izpis_poraza(igra):
if igra.poraz():
return "Izgubili ste. Poskusite novo igro."
return None
def zahtevaj_vnos():
return str(input("Napiši (na blef ugibano) črko: "))
def pozeni_vmesnik():
print("Pozdravljen v igri VISLICE! Začnimo z igro.")
igra = Igra(random.choice(bazen_besed), [])
print("Geslo je določeno.")
while True:
print(igra.pravilni_del_gesla())
crka = zahtevaj_vnos()
izid_kroga = igra.ugibaj(crka)
|
from dataclasses import replace
from decimal import Decimal
import pytest
from pricehist.price import Price
from pricehist.series import Series
@pytest.fixture
def series():
return Series(
"BASE",
"QUOTE",
"type",
"2021-01-01",
"2021-06-30",
[
Price("2021-01-01", Decimal("1.0123456789")),
Price("2021-01-02", Decimal("2.01234567890123456789")),
Price("2021-01-03", Decimal("3.012345678901234567890123456789")),
],
)
def test_invert(series):
result = series.invert()
assert (series.base, series.quote) == ("BASE", "QUOTE")
assert (result.base, result.quote) == ("QUOTE", "BASE")
def test_rename_base(series):
result = series.rename_base("NEWBASE")
assert series.base == "BASE"
assert result.base == "NEWBASE"
def test_rename_quote(series):
result = series.rename_quote("NEWQUOTE")
assert series.quote == "QUOTE"
assert result.quote == "NEWQUOTE"
def test_quantize_rounds_half_even(series):
subject = replace(
series,
prices=[
Price("2021-01-01", Decimal("1.14")),
Price("2021-01-02", Decimal("2.25")),
Price("2021-01-03", Decimal("3.35")),
Price("2021-01-04", Decimal("4.46")),
],
)
amounts = [p.amount for p in subject.quantize(1).prices]
assert amounts == [
Decimal("1.1"),
Decimal("2.2"),
Decimal("3.4"),
Decimal("4.5"),
]
def test_quantize_does_not_extend(series):
subject = replace(
series,
prices=[
Price("2021-01-01", Decimal("1.14")),
Price("2021-01-02", Decimal("2.25")),
Price("2021-01-03", Decimal("3.35")),
Price("2021-01-04", Decimal("4.46")),
],
)
amounts = [p.amount for p in subject.quantize(3).prices]
assert amounts == [
Decimal("1.14"),
Decimal("2.25"),
Decimal("3.35"),
Decimal("4.46"),
]
def test_quantize_does_not_go_beyond_context_max_prec(series):
subject = replace(
series,
prices=[
Price("2021-01-01", Decimal("1.012345678901234567890123456789")),
],
)
assert subject.prices[0].amount == Decimal("1.012345678901234567890123456789")
result0 = subject.quantize(26)
result1 = subject.quantize(27)
result2 = subject.quantize(35)
assert result0.prices[0].amount == Decimal("1.01234567890123456789012346")
assert result1.prices[0].amount == Decimal("1.012345678901234567890123457")
assert result2.prices[0].amount == Decimal("1.012345678901234567890123457")
|
import z3
from variable import Var
from valuation import Valuation
class Formula:
def __init__(self, valuation=None, disabled=None):
self._domain = set()
self._assertions = []
if valuation is not None:
self.assert_valuation(valuation)
if disabled is not None:
self.assert_all_pairs_absent(disabled)
@property
def domain(self):
return self._domain
def __iter__(self):
for v in self.domain:
yield v
@staticmethod
def _id(var):
return z3.Bool("{}{}".format(var.state,
"!" if var.unique else ""))
@staticmethod
def _states_constraints(states):
conjuncts = []
domain = set()
for q in states:
var = Var(q)
conjuncts.append(z3.Not(Formula._id(var)))
domain.add(var)
return (z3.And(conjuncts), domain)
def assert_some_states_present(self, states):
constraints, domain = Formula._states_constraints(states)
self._assertions.append(z3.Not(constraints))
self._domain |= domain
def assert_all_states_absent(self, states):
constraints, domain = Formula._states_constraints(states)
self._assertions.append(constraints)
self._domain |= domain
def assert_valuation(self, valuation):
for var in valuation:
self._domain.add(var)
if valuation[var]:
self._assertions.append(Formula._id(var))
else:
self._assertions.append(z3.Not(Formula._id(var)))
@staticmethod
def _pairs_constraints(pairs):
conjuncts = []
domain = set()
for pair in pairs:
p, q = tuple(pair)
if p != q:
domain |= {Var(p), Var(q)}
conjuncts.append(z3.Or(z3.Not(Formula._id(Var(p))),
z3.Not(Formula._id(Var(q)))))
else:
domain |= {Var(p), Var(p, True)}
conjuncts.append(z3.Or(z3.Not(Formula._id(Var(p))),
Formula._id(Var(p, True))))
return (z3.And(conjuncts), domain)
def _consistency_constraints(self):
conjuncts = []
for v in self._domain:
if (v.unique):
expr = z3.Implies(Formula._id(v),
Formula._id(v.opposite()))
else:
expr = z3.Implies(z3.Not(Formula._id(v)),
z3.Not(Formula._id(v.opposite())))
conjuncts.append(expr)
return z3.And(conjuncts)
def assert_all_pairs_absent(self, pairs):
constraints, domain = Formula._pairs_constraints(pairs)
self._assertions.append(constraints)
self._domain |= domain
def assert_some_pair_present(self, pairs):
constraints, domain = Formula._pairs_constraints(pairs)
self._assertions.append(z3.Not(constraints))
self._domain |= domain
def tautology_check(self, constraints):
solver = z3.Solver()
solver.add(self._consistency_constraints())
solver.add(self._assertions)
disjuncts = []
for (pos, neg, pre) in constraints:
absent, _ = Formula._pairs_constraints({pre})
pos_conj = z3.And([Formula._id(v) for v in pos])
neg_conj = z3.And([z3.Not(Formula._id(v)) for v in neg])
disjuncts.append(z3.And(pos_conj, neg_conj, z3.Not(absent)))
solver.add(z3.Or(disjuncts))
result = solver.check()
return (result == z3.unsat)
def implies_all_absent_tautology_check(self, pairs):
solver = z3.Solver()
constraints, _ = Formula._pairs_constraints(pairs)
solver.add(self._consistency_constraints())
solver.add(z3.Not(z3.Implies(z3.And(self._assertions), constraints)))
result = solver.check()
return (result == z3.unsat)
def implies_some_present_tautology_check(self, pairs):
solver = z3.Solver()
constraints, _ = Formula._pairs_constraints(pairs)
solver.add(self._consistency_constraints())
solver.add(self._assertions) # (assertions and constraints) equiv. to:
solver.add(constraints) # not(assertions => not constraints)
result = solver.check()
return (result == z3.unsat)
def solutions(self):
solver = z3.Solver()
solver.add(self._consistency_constraints())
solver.add(self._assertions)
sol = []
while (solver.check() == z3.sat):
model = solver.model()
valuation = Valuation()
for var in self:
valuation[var] = z3.is_true(model[Formula._id(var)])
sol.append(valuation)
# Forbid solution in future checks
solver.add(z3.Or([z3.Not(Formula._id(v)) if valuation[v]
else Formula._id(v) for v in valuation]))
return sol
def implies(self, formula):
solver = z3.Solver()
solver.add(self._consistency_constraints())
solver.add(formula._consistency_constraints())
solver.add(z3.Not(z3.Implies(z3.And(self._assertions),
z3.And(formula._assertions))))
result = solver.check()
return (result == z3.unsat)
def __str__(self):
return str(self._assertions)
def __repr__(self):
return str(self)
|
import logging
import fnmatch
from typing import Set, Optional, Union, List
from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR
class RunnerFilter(object):
# NOTE: This needs to be static because different filters may be used at load time versus runtime
# (see note in BaseCheckRegistery.register). The concept of which checks are external is
# logically a "static" concept anyway, so this makes logical sense.
__EXTERNAL_CHECK_IDS: Set[str] = set()
def __init__(
self,
framework: str = "all",
checks: Union[str, List[str], None] = None,
skip_checks: Union[str, List[str], None] = None,
download_external_modules: bool = False,
external_modules_download_path: str = DEFAULT_EXTERNAL_MODULES_DIR,
evaluate_variables: bool = True,
runners: Optional[List[str]] = None,
skip_framework: Optional[str] = None,
excluded_directories: Optional[List[str]] = None
) -> None:
if checks is None:
checks = []
if isinstance(checks, str):
self.checks = checks.split(",")
elif isinstance(checks, list) and len(checks) == 1:
self.checks = checks[0].split(",")
else:
self.checks = checks
if skip_checks is None:
skip_checks = []
if isinstance(skip_checks, str):
self.skip_checks = skip_checks.split(",")
elif isinstance(skip_checks, list) and len(skip_checks) == 1:
self.skip_checks = skip_checks[0].split(",")
else:
self.skip_checks = skip_checks
if skip_framework is None:
self.framework = framework
else:
if isinstance(skip_framework, str):
if framework == "all":
if runners is None:
runners = []
selected_frameworks = list(set(runners) - set(skip_framework.split(",")))
self.framework = ",".join(selected_frameworks)
else:
selected_frameworks = list(set(framework.split(",")) - set(skip_framework.split(",")))
self.framework = ",".join(selected_frameworks)
logging.info(f"Resultant set of frameworks (removing skipped frameworks): {self.framework}")
self.download_external_modules = download_external_modules
self.external_modules_download_path = external_modules_download_path
self.evaluate_variables = evaluate_variables
self.excluded_paths = excluded_directories
def should_run_check(self, check_id: str) -> bool:
if RunnerFilter.is_external_check(check_id):
pass # enabled unless skipped
elif self.checks:
if check_id in self.checks:
return True
else:
return False
if self.skip_checks and any(fnmatch.fnmatch(check_id, pattern) for pattern in self.skip_checks):
return False
return True
@staticmethod
def notify_external_check(check_id: str) -> None:
RunnerFilter.__EXTERNAL_CHECK_IDS.add(check_id)
@staticmethod
def is_external_check(check_id: str) -> bool:
return check_id in RunnerFilter.__EXTERNAL_CHECK_IDS
|
# http://adventofcode.com/2016/day/11
import copy
import itertools
import re
POSSIBLE_ITEMS_IN_ELEVATOR = [1, 2]
ALL_SEEN_STATES = set()
class Generator(object):
def __init__(self, t):
self.type = t
def __repr__(self):
return "%s generator" % self.type
def __eq__(self, other):
return isinstance(other, Generator) and other.type == self.type
class Microchip(object):
def __init__(self, t):
self.type = t
def __repr__(self):
return "%s microchip" % self.type
def __eq__(self, other):
return isinstance(other, Microchip) and other.type == self.type
class State(object):
def __init__(self, floors, current_floor_index, steps_taken):
self.floors = floors
self.steps_taken = steps_taken
self.current_floor_index = current_floor_index
def new_state(self):
return State(
[Floor([i for i in f.items], f.floor_i, f.has_elevator) for f in self.floors],
self.current_floor_index,
self.steps_taken
)
def __eq__(self, other):
return self.floors == other.floors
def finished(self):
return all([f.is_empty() for f in self.floors[:-1]])
class Floor(object):
def __init__(self, items, floor_i, has_elevator=False):
self.items = items
self.floor_i = floor_i
self.has_elevator = has_elevator
def is_empty(self):
return len(self.items) == 0
@property
def matched_types(self):
return sorted([g.type for m in self.generators for g in self.microchips if g.type == m.type])
@property
def matched_pairs(self):
pairs = []
for t in self.matched_types:
for m in self.generators:
for g in self.microchips:
if m.type == t and g.type == t:
pairs.append([g, m])
return sorted(pairs)
@property
def unmatched_generators(self):
return sorted([i for i in self.generators if i.type not in self.matched_types])
@property
def unmatched_microchips(self):
return sorted([i for i in self.microchips if i.type not in self.matched_types])
@property
def generators(self):
return sorted([i for i in self.items if isinstance(i, Generator)])
@property
def microchips(self):
return sorted([i for i in self.items if isinstance(i, Microchip)])
def is_irradiation(self):
unmatched_types = list(
set([i.type for i in self.generators]) ^
set([i.type for i in self.microchips])
)
return any(u == i.type for i in self.microchips for u in unmatched_types) and len(self.generators) > 0
def __eq__(self, other):
if self.floor_i != other.floor_i:
return False
if self.has_elevator != other.has_elevator:
return False
if len(self.matched_types) != len(other.matched_types):
return False
if len(self.unmatched_generators) != len(other.unmatched_generators):
return False
if len(self.unmatched_microchips) != len(other.unmatched_microchips):
return False
return True
def remove_irradiation_states(states):
return [s for s in states if not any(f.is_irradiation() for f in s.floors)]
def possible_combos(floor):
possible_items = (floor.unmatched_generators +
floor.unmatched_microchips +
(floor.matched_pairs[0] if len(floor.matched_types) > 0 else []))
for count in POSSIBLE_ITEMS_IN_ELEVATOR:
for combo in itertools.combinations(possible_items, count):
yield combo
def get_next_states_up(state):
current_floor_i = state.current_floor_index
current_floor = state.floors[current_floor_i]
if current_floor_i == len(state.floors) - 1:
return []
items = copy.copy(current_floor.items)
states = []
for combo in possible_combos(current_floor):
new_state = state.new_state()
new_state.current_floor_index = current_floor_i + 1
# No point moving up to an empty floor with one item
if len(combo) == 1 and len(new_state.floors[current_floor_i +1].items) == 0:
continue
next_floor_items = copy.copy(new_state.floors[current_floor_i + 1].items)
next_floor_items += combo
new_state.floors[current_floor_i + 1] = Floor(next_floor_items, current_floor_i + 1, True)
current_floor_items = list(set(items) - set(combo))
new_state.floors[current_floor_i] = Floor(current_floor_items, current_floor_i, False)
states.append(new_state)
return states
def all_floors_below_empty(state):
floors_below = [f for f in state.floors[:state.current_floor_index]]
return all(f.is_empty() for f in floors_below)
def get_next_states_down(state):
current_floor_i = state.current_floor_index
current_floor = state.floors[current_floor_i]
if current_floor_i == 0 or all_floors_below_empty(state):
return []
states = []
for combo in possible_combos(current_floor):
new_state = state.new_state()
new_state.current_floor_index = current_floor_i - 1
next_floor_items = [i for i in new_state.floors[current_floor_i - 1].items]
next_floor_items += combo
new_state.floors[current_floor_i - 1] = Floor(next_floor_items, current_floor_i - 1, True)
current_floor_items = list(set(current_floor.items) - set(combo))
new_state.floors[current_floor_i] = Floor(current_floor_items, current_floor_i, False)
states.append(new_state)
return states
def remove_waste_of_time_states(states):
global ALL_SEEN_STATES
return [s for s in states if s not in ALL_SEEN_STATES]
def get_next_states(state):
global ALL_SEEN_STATES
next_states = []
states = get_next_states_up(state)
states = remove_irradiation_states(states)
states = remove_waste_of_time_states(states)
next_states += states
states = get_next_states_down(state)
states = remove_irradiation_states(states)
states = remove_waste_of_time_states(states)
next_states += states
for s in next_states:
ALL_SEEN_STATES.add(s)
return next_states
def parse_input(filename):
floors = []
with open(filename) as f:
i = 0
for l in f.readlines():
generators = [Generator(m) for m in re.findall('([^ ]*) generator', l)]
microchips = [Microchip(m) for m in re.findall('([^ ]*)-compatible microchip', l)]
f = Floor(generators + microchips, i)
floors.append(f)
i += 1
floors[0].has_elevator = True
return floors
def branch_to_next_step(game):
print game.steps_taken
states = get_next_states(game)
games = [s for s in states]
for s in states:
s.steps_taken += 1
return games
if __name__ == "__main__":
main_floors = parse_input("input")
main_state = State(main_floors, 0, 0)
main_finished_games = []
main_games = [main_state]
main_seen_steps = [main_state]
while not main_finished_games and len(main_games) > 0:
main_next_games = []
for g in main_games:
main_branches = branch_to_next_step(g)
for main_branch in main_branches:
if main_branch.finished():
main_finished_games.append(main_branch)
if main_branch not in main_seen_steps:
main_next_games.append(main_branch)
main_seen_steps.append(main_branch)
main_games = main_next_games
if main_finished_games:
print "Steps in game: %d" % main_finished_games[0].steps_taken
print "Num games: %d" % len(main_finished_games)
else:
print "no finished games!"
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Isaac Saito, Ze'ev Klapow, Austin Hendrix
from math import floor
from collections import deque
import rospy
from python_qt_binding.QtCore import QPointF, Signal, Slot
from python_qt_binding.QtGui import QColor, QIcon
from python_qt_binding.QtWidgets import QGraphicsPixmapItem, QGraphicsView, \
QGraphicsScene
import rqt_robot_monitor.util_robot_monitor as util
from diagnostic_msgs.msg import DiagnosticStatus
class TimelineView(QGraphicsView):
"""
This class draws a graphical representation of a timeline.
This is ONLY the bar and colored boxes.
When you instantiate this class, do NOT forget to call set_init_data to
set necessary data.
"""
paused = Signal(bool)
position_changed = Signal(int)
redraw = Signal()
def __init__(self, parent=None):
"""Cannot take args other than parent due to loadUi limitation."""
super(TimelineView, self).__init__(parent=parent)
self._timeline_marker = QIcon.fromTheme('system-search')
self._min = 0
self._max = 0
self._xpos_marker = -1
self._timeline_marker_width = 15
self._timeline_marker_height = 15
self._last_marker_at = -1
self.setUpdatesEnabled(True)
self._scene = QGraphicsScene(self)
self.setScene(self._scene)
self._levels = None
self.redraw.connect(self._signal_redraw)
def mouseReleaseEvent(self, event):
"""
:type event: QMouseEvent
"""
xpos = self.pos_from_x(event.x())
self.set_marker_pos(xpos)
def mousePressEvent(self, event):
"""
:type event: QMouseEvent
"""
# Pause the timeline
self.paused.emit(True)
xpos = self.pos_from_x(event.x())
self.set_marker_pos(xpos)
def mouseMoveEvent(self, event):
"""
:type event: QMouseEvent
"""
xpos = self.pos_from_x(event.x())
self.set_marker_pos(xpos)
def pos_from_x(self, x):
"""
Get the index in the timeline from the mouse click position
:param x: Position relative to self widget.
:return: Index
"""
width = self.size().width()
# determine value from mouse click
width_cell = width / float(max(len(self._levels), 1))
position = int(floor(x / width_cell))
if position == len(self._levels) - 1:
return -1
return position
@Slot(int)
def set_marker_pos(self, xpos):
"""
Set marker position from index
:param xpos: Marker index
"""
if self._levels is None:
rospy.logwarn('Called set_marker_pos before set_levels')
return
if xpos == -1:
# stick to the latest when position is -1
self._xpos_marker = -1
# check if we chose latest item
if self._last_marker_at != self._xpos_marker:
# update variable to check for change during next round
self._last_marker_at = self._xpos_marker
# emit change to all timeline_panes
self.position_changed.emit(self._xpos_marker)
self.redraw.emit()
return
self._xpos_marker = self._clamp(xpos, self._min, self._max)
if self._xpos_marker == self._last_marker_at:
# Clicked the same pos as last time.
return
elif self._xpos_marker >= len(self._levels):
# When clicked out-of-region
return
self._last_marker_at = self._xpos_marker
# Set timeline position. This broadcasts the message at that position
# to all of the other viewers
self.position_changed.emit(self._xpos_marker)
self.redraw.emit()
def _clamp(self, val, min, max):
"""
Judge if val is within the range given by min & max.
If not, return either min or max.
:type val: any number format
:type min: any number format
:type max: any number format
:rtype: int
"""
if (val < min):
return min
if (val > max):
return max
return val
@Slot(list)
def set_levels(self, levels):
self._levels = levels
self.redraw.emit()
@Slot()
def _signal_redraw(self):
"""
Gets called either when new msg comes in or when marker is moved by
user.
"""
if self._levels is None:
return
# update the limits
self._min = 0
self._max = len(self._levels) - 1
self._scene.clear()
qsize = self.size()
width_tl = qsize.width()
w = width_tl / float(max(len(self._levels), 1))
is_enabled = self.isEnabled()
for i, level in enumerate(self._levels):
h = self.viewport().height()
# Figure out each cell's color.
qcolor = QColor('grey')
if is_enabled and level is not None:
if level > DiagnosticStatus.ERROR:
# Stale items should be reported as errors unless all stale
level = DiagnosticStatus.ERROR
qcolor = util.level_to_color(level)
# TODO Use this code for adding gradation to the cell color.
# end_color = QColor(0.5 * QColor('red').value(),
# 0.5 * QColor('green').value(),
# 0.5 * QColor('blue').value())
self._scene.addRect(w * i, 0, w, h, QColor('white'), qcolor)
# Getting marker index.
xpos_marker = self._xpos_marker
# If marker is -1 for latest use (number_of_cells -1)
if xpos_marker == -1:
xpos_marker = len(self._levels) - 1
# Convert get horizontal pixel value of selected cell's center
xpos_marker_in_pixel = (xpos_marker * w +
(w / 2.0) - (self._timeline_marker_width / 2.0))
pos_marker = QPointF(xpos_marker_in_pixel, 0)
# Need to instantiate marker everytime since it gets deleted
# in every loop by scene.clear()
timeline_marker = self._instantiate_tl_icon()
timeline_marker.setPos(pos_marker)
self._scene.addItem(timeline_marker)
def _instantiate_tl_icon(self):
timeline_marker_icon = QIcon.fromTheme('system-search')
timeline_marker_icon_pixmap = timeline_marker_icon.pixmap(
self._timeline_marker_width,
self._timeline_marker_height)
return QGraphicsPixmapItem(timeline_marker_icon_pixmap)
|
"""Main module."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Union
@dataclass
class Img:
name: str
size: int = 0
@dataclass
class ImgNode:
img: Img
prev: Union[ImgNode, None] = None
next: Union[ImgNode, None] = None
def chain_prev(self, prev_node: ImgNode):
self.prev = prev_node
def chain_next(self, next_node: ImgNode):
self.next = next_node
def unchain_prev(self) -> ImgNode:
node = self.prev
self.prev = None
return node
def unchain_next(self) -> ImgNode:
node = self.next
self.next = None
return node
@dataclass
class ImgManager:
cache_size: int = 0
_head_node: Union[ImgNode, None] = None
_tail_node: Union[ImgNode, None] = None
_img_map: dict = field(default_factory=dict)
_free_cache_available: int = field(init=False)
def __post_init__(self):
self._free_cache_available = self.cache_size
def actual_cache_size(self) -> int:
return self._free_cache_available
def _take_free_cache_storage(self, size: int) -> int:
if self._free_cache_available - size < 0:
raise Exception("attempted to overflow the size of the cache")
self._free_cache_available -= size
return self._free_cache_available
def _recover_cache_storage(self, size: int):
if self._free_cache_available + size > self.cache_size:
raise Exception("attempted to recover unassigned storage")
self._free_cache_available += size
return self._free_cache_available
def _cache_image(self, img: Img):
self._img_map[img.name] = img
def _remove_from_cache(self, img: Img) -> Img:
n_img = self._img_map[img.name]
del self._img_map[img.name]
return n_img
def _image_cached(self, key: str) -> Img:
return self._img_map.get(key, None)
def _insert_image_on_top(self, img: Img):
node = ImgNode(img)
if self._head_node is None:
self._head_node, self._tail_node = node, node
else:
self._head_node.chain_next(node)
node.chain_prev(self._head_node)
self._head_node = node
def _drop_lru_image(self) -> Img:
if self._tail_node is None:
raise Exception("no nodes at all.")
else:
node = self._tail_node
next_node = node.unchain_next()
if next_node:
assert next_node.unchain_prev() == node
self._tail_node = next_node
else:
self._head_node, self._tail_node = None, None
return node.img
def push_image(self, img: Img) -> bool:
while True:
if self._image_cached(img.name):
return True
if img.size > self.cache_size:
return False
if self._free_cache_available - img.size < 0:
lru_image = self._drop_lru_image()
self._remove_from_cache(lru_image)
self._recover_cache_storage(lru_image.size)
else:
self._cache_image(img)
self._take_free_cache_storage(img.size)
self._insert_image_on_top(img)
return True
def get_head_image(self) -> Union[Img, None]:
if self._head_node:
return self._head_node.img
return None
def get_tail_image(self) -> Union[Img, None]:
if self._tail_node:
return self._tail_node.img
return None
def get_free_cache_available(self) -> int:
return self._free_cache_available
def num_of_cached_imgs(self) -> int:
return len(self._img_map)
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for displaying an overview of Pinpoint."""
from dashboard.pinpoint import request_handler
class MainHandler(request_handler.RequestHandler):
"""Shows the main overview for Pinpoint."""
def get(self):
"""Renders the UI for main overview page."""
self.RenderStaticHtml('main.html')
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageReference(Model):
"""The image reference.
All required parameters must be populated in order to send to Azure.
:param publisher: Required. Publisher of the image.
:type publisher: str
:param offer: Required. Offer of the image.
:type offer: str
:param sku: Required. SKU of the image.
:type sku: str
:param version: Version of the image.
:type version: str
:param virtual_machine_image_id: The ARM resource identifier of the
virtual machine image. Computes nodes of the cluster will be created using
this custom image. This is of the form
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}.
The virtual machine image must be in the same region and subscription as
the cluster. For information about the firewall settings for the Batch
node agent to communicate with the Batch service see
https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration.
Note, you need to provide publisher, offer and sku of the base OS image of
which the custom image has been derived from.
:type virtual_machine_image_id: str
"""
_validation = {
'publisher': {'required': True},
'offer': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'publisher': {'key': 'publisher', 'type': 'str'},
'offer': {'key': 'offer', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'virtual_machine_image_id': {'key': 'virtualMachineImageId', 'type': 'str'},
}
def __init__(self, *, publisher: str, offer: str, sku: str, version: str=None, virtual_machine_image_id: str=None, **kwargs) -> None:
super(ImageReference, self).__init__(**kwargs)
self.publisher = publisher
self.offer = offer
self.sku = sku
self.version = version
self.virtual_machine_image_id = virtual_machine_image_id
|
import collections
from typing import List, Tuple
class Report:
"""
A column-based report outputing data from the system.
Composed of a ReportHeader and ReportBody, which in turn
contains multiple ReturnLine objects.
"""
def __init__(self, data: List[Tuple], titles: List) -> None:
self.titles = titles
self.data = data
self.header = ReportHeader(titles)
self.body = ReportBody(self.data)
self._create_presentation()
def _create_presentation(self):
self.presentation = []
self.presentation.append(str(self.header))
for line in self.body.data:
self.presentation.append(str(line))
class ReportCell:
"A ReportLine is comprised of a list of ReportCell objects."
def __init__(self, text: str) -> None:
pass
class ReportHeader:
"The top line of a Report."""
def __init__(self, headers: List) -> None:
self.headers = headers
def __str__(self):
return " | ".join(self.headers)
class ReportBody:
"The body of a Report, containing ReportLine objects."
def __init__(self, data: List[Tuple]) -> None:
self.data = data
class ReportLine:
"""
Formats a tuple to create a line suitable for including in a Report.
"""
def __init__(self, report: Report, data: Tuple) -> None:
self.report = report
self.data = data
|
import sys
sys.path.append("../")
from ucsdpcb import PcbPlacer, PcbRouter, PcbDB
db = PcbDB.kicadPcbDataBase('../module/PCBBenchmarks/bm9/bm9.routed.kicad_pcb')
db.printNodes()
placer = PcbPlacer.GridBasedPlacer(db)
placer.set_num_iterations(1)
placer.set_iterations_moves(1)
placer.test_placer_flow()
db.printNodes()
router = PcbRouter.GridBasedRouter(db)
router.set_num_ripup_reroute_iteration(1)
router.route_all_net_with_ripup_and_reroute()
db.printKiCad()
|
from common import config
from common import util
class entityObject(object):
"""Read YAML config file to get access info
(device ip, tcp port, user name, keys, etc)
"""
def __init__(self, entities_yml_file=config.cfg.entities_yml_file, *args, **kwargs):
self.entities_yml_file = entities_yml_file
self.entities = util.read_data_file(self.entities_yml_file)
self.users = util.myDict(self.getUsers())
self.devices = util.myDict(self.getDevices())
def getUsers(self):
return self.entities.snmpv3_users
def getDevices(self):
return self.entities.devices
entities = entityObject()
|
# -*- coding: utf-8 -*-
import numpy as np
import copy
from pyJaya.consts import minimaxType
from pyJaya.population import Population
class JayaBase(object):
"""Jaya base class
Args:
numSolutions (int): Number of solutions of population.
listVars (list): Range list.
functionToEvaluate (funtion): Function to minimize or maximize.
space (bool): Spaced numbers over a specified interval.
minimaxType (minimaxType, optional): Min or Max. Defaults to [minimize]
listConstraints (list, optional): Constraint list. Defaults to [].
population (Population, optional): Population. Defaults to None.
"""
def __init__(
self, numSolutions, listVars, functionToEvaluate, space=False,
minimaxType=minimaxType['minimize'], listConstraints=[],
population=None):
super(JayaBase, self).__init__()
self.functionToEvaluate = functionToEvaluate
self.numSolutions = numSolutions
self.listVars = listVars
self.cantVars = len(listVars)
self.minimax = minimaxType
self.listConstraints = listConstraints
self.space = space
if population is None:
self.population = self.generatePopulation()
else:
self.population = copy.deepcopy(population)
def generate_rn(self, number_iterations):
"""Generate random numbers
"""
rn = [None] * number_iterations
for iter in range(number_iterations):
rn[iter] = [None] * self.cantVars
for y in range(self.cantVars):
rn[iter][y] = [None] * 2
for j in range(2):
np.random.seed()
rn[iter][y][j] = np.random.rand()
return rn
def generatePopulation(self):
"""Generate population
Returns:
Population: Population generated.
"""
population = Population(self.minimax)
population.generate(
self.numSolutions, self.listVars, self.functionToEvaluate,
self.space, self.listConstraints)
return population
def addConstraint(self, constraintFuntion):
"""Add constraint
Args:
constraintFuntion (funtion): Funtion to add as constraint.
"""
self.listConstraints.append(constraintFuntion)
def toMaximize(self):
"""Change to maximize funtion.
"""
self.minimax = minimaxType['maximize']
self.population.toMaximize()
def getBestAndWorst(self):
"""Best and worst value and solution
Returns:
dict: Best value, worst value, best solution and worst solution.
"""
return self.population.getBestAndWorst()
def run(self, number_iterations, rn=[]):
"""Client must define it self"""
raise NotImplementedError("Client must define it self")
|
import torch
def gather_elementwise(tensor, idx_tensor):
'''
For `tensor.shape = tensor_shape + (K,)`
and `idx_tensor.shape = tensor_shape` with elements in {0,1,...,K-1}
'''
return tensor.gather(-1, idx_tensor[..., None])[..., 0]
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.osconfig.agentendpoint_v1beta.types import patch_jobs
__protobuf__ = proto.module(
package='google.cloud.osconfig.agentendpoint.v1beta',
manifest={
'TaskDirective',
'TaskType',
'Task',
'ApplyPatchesTask',
'ApplyPatchesTaskProgress',
'ApplyPatchesTaskOutput',
'ExecStepTask',
'ExecStepTaskProgress',
'ExecStepTaskOutput',
},
)
class TaskDirective(proto.Enum):
r"""Specifies the current agent behavior."""
TASK_DIRECTIVE_UNSPECIFIED = 0
CONTINUE = 1
STOP = 2
class TaskType(proto.Enum):
r"""Specifies the type of task to perform."""
TASK_TYPE_UNSPECIFIED = 0
APPLY_PATCHES = 1
EXEC_STEP_TASK = 2
class Task(proto.Message):
r"""A unit of work to be performed by the agent.
Attributes:
task_id (str):
Unique task id.
task_type (google.cloud.osconfig.agentendpoint_v1beta.types.TaskType):
The type of task to perform.
Task details must include the appropriate message based on
this enum as specified below: APPLY_PATCHES =
ApplyPatchesTask EXEC_STEP = ExecStepTask;
task_directive (google.cloud.osconfig.agentendpoint_v1beta.types.TaskDirective):
Current directive to the agent.
apply_patches_task (google.cloud.osconfig.agentendpoint_v1beta.types.ApplyPatchesTask):
Details about the apply patches task to
perform.
exec_step_task (google.cloud.osconfig.agentendpoint_v1beta.types.ExecStepTask):
Details about the exec step task to perform.
service_labels (Sequence[google.cloud.osconfig.agentendpoint_v1beta.types.Task.ServiceLabelsEntry]):
Labels describing the task. Used for logging
by the agent.
"""
task_id = proto.Field(
proto.STRING,
number=1,
)
task_type = proto.Field(
proto.ENUM,
number=2,
enum='TaskType',
)
task_directive = proto.Field(
proto.ENUM,
number=3,
enum='TaskDirective',
)
apply_patches_task = proto.Field(
proto.MESSAGE,
number=4,
oneof='task_details',
message='ApplyPatchesTask',
)
exec_step_task = proto.Field(
proto.MESSAGE,
number=5,
oneof='task_details',
message='ExecStepTask',
)
service_labels = proto.MapField(
proto.STRING,
proto.STRING,
number=6,
)
class ApplyPatchesTask(proto.Message):
r"""Message which instructs agent to apply patches.
Attributes:
patch_config (google.cloud.osconfig.agentendpoint_v1beta.types.PatchConfig):
Specific information about how patches should
be applied.
dry_run (bool):
If true, the agent will report its status as
it goes through the motions but won't actually
run any updates or perform any reboots.
"""
patch_config = proto.Field(
proto.MESSAGE,
number=1,
message=patch_jobs.PatchConfig,
)
dry_run = proto.Field(
proto.BOOL,
number=3,
)
class ApplyPatchesTaskProgress(proto.Message):
r"""Information reported from the agent about applying patches
execution.
Attributes:
state (google.cloud.osconfig.agentendpoint_v1beta.types.ApplyPatchesTaskProgress.State):
Required. The current state of this patch
execution.
"""
class State(proto.Enum):
r"""The intermediate states of applying patches."""
STATE_UNSPECIFIED = 0
STARTED = 4
DOWNLOADING_PATCHES = 1
APPLYING_PATCHES = 2
REBOOTING = 3
state = proto.Field(
proto.ENUM,
number=1,
enum=State,
)
class ApplyPatchesTaskOutput(proto.Message):
r"""Information reported from the agent about applying patches
execution.
Attributes:
state (google.cloud.osconfig.agentendpoint_v1beta.types.ApplyPatchesTaskOutput.State):
Required. The final state of this task.
"""
class State(proto.Enum):
r"""The final states of applying patches."""
STATE_UNSPECIFIED = 0
SUCCEEDED = 1
SUCCEEDED_REBOOT_REQUIRED = 2
FAILED = 3
state = proto.Field(
proto.ENUM,
number=1,
enum=State,
)
class ExecStepTask(proto.Message):
r"""Message which instructs agent to execute the following
command.
Attributes:
exec_step (google.cloud.osconfig.agentendpoint_v1beta.types.ExecStep):
Details of the exec step to run.
"""
exec_step = proto.Field(
proto.MESSAGE,
number=1,
message=patch_jobs.ExecStep,
)
class ExecStepTaskProgress(proto.Message):
r"""Information reported from the agent about the exec step
execution.
Attributes:
state (google.cloud.osconfig.agentendpoint_v1beta.types.ExecStepTaskProgress.State):
Required. The current state of this exec
step.
"""
class State(proto.Enum):
r"""The intermediate states of exec steps."""
STATE_UNSPECIFIED = 0
STARTED = 1
state = proto.Field(
proto.ENUM,
number=1,
enum=State,
)
class ExecStepTaskOutput(proto.Message):
r"""Information reported from the agent about the exec step
execution.
Attributes:
state (google.cloud.osconfig.agentendpoint_v1beta.types.ExecStepTaskOutput.State):
Required. The final state of the exec step.
exit_code (int):
Required. The exit code received from the
script which ran as part of the exec step.
"""
class State(proto.Enum):
r"""The final states of exec steps."""
STATE_UNSPECIFIED = 0
COMPLETED = 1
TIMED_OUT = 2
CANCELLED = 3
state = proto.Field(
proto.ENUM,
number=1,
enum=State,
)
exit_code = proto.Field(
proto.INT32,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random
import caffe
from lib import run_net
from lib import score_util
from datasets.cityscapes import cityscapes
net = caffe.Net('../nets/stage-cityscapes-fcn8s.prototxt',
'../nets/cityscapes-fcn8s-heavy.caffemodel',
caffe.TEST)
CS = cityscapes('/x/cityscapes')
n_cl = len(CS.classes)
split = 'val'
label_frames = CS.list_label_frames(split)
hist_perframe = np.zeros((n_cl, n_cl))
for i, idx in enumerate(label_frames):
if i % 100 == 0:
print('running {}/{}'.format(i, len(label_frames)))
city = idx.split('_')[0]
# idx is city_shot_frame
im = CS.load_image(split, city, idx)
out = run_net.segrun(net, CS.preprocess(im))
label = CS.load_label(split, city, idx)
hist_perframe += score_util.fast_hist(label.flatten(), out.flatten(), n_cl)
accP, cl_accP, mean_iuP, fw_iuP = score_util.get_scores(hist_perframe)
print('Oracle: Per frame')
print('acc\t\t cl acc\t\t mIU\t\t fwIU')
print('{:f}\t {:f}\t {:f}\t {:f}\t'.format(100*accP, 100*cl_accP, 100*mean_iuP, 100*fw_iuP))
hist_baseline = np.zeros((n_cl, n_cl))
for i, idx in enumerate(label_frames):
if i % 100 == 0:
print('running {}/{}'.format(i, len(label_frames)))
city = idx.split('_')[0]
all_frames = CS.collect_frame_sequence(split, idx, 19) # list of Images including labeled frame
label = CS.load_label(split, city, idx) # label for CURRENT frame
choice = random.random() # in [0,1)
if choice < 0.5:
preceding_frame = all_frames[-2] # do previous frame
out = run_net.segrun(net, CS.preprocess(preceding_frame))
hist_baseline += score_util.fast_hist(label.flatten(), out.flatten(), n_cl)
else:
curr_frame = all_frames[-1]
out = run_net.segrun(net, CS.preprocess(curr_frame))
hist_baseline += score_util.fast_hist(label.flatten(), out.flatten(), n_cl)
acc, cl_acc, mean_iu, fw_iu = score_util.get_scores(hist_baseline)
print('Baseline: Full FCN every other frame')
print('acc\t\t cl acc\t\t mIU\t\t fwIU')
print('{:f}\t {:f}\t {:f}\t {:f}\t'.format(100*acc, 100*cl_acc, 100*mean_iu, 100*fw_iu))
hist_altern = np.zeros((n_cl, n_cl))
for i, idx in enumerate(label_frames):
if i % 100 == 0:
print('running {}/{}'.format(i, len(label_frames)))
city = idx.split('_')[0]
all_frames = CS.collect_frame_sequence(split, idx, 19) # list of Images including labeled frame
label = CS.load_label(split, city, idx)
curr_frame = all_frames[-1]
choice = random.random() # in [0,1)
if choice < 0.5:
# Push previous frame through the net
preceding_frame = all_frames[-2] # do previous frame
_ = run_net.segrun(net, CS.preprocess(preceding_frame))
# Update lower layers on current frame and get prediction
out = run_net.clockwork_forward(net, CS.preprocess(curr_frame))
hist_altern += score_util.fast_hist(label.flatten(), out.flatten(), n_cl)
else:
out = run_net.segrun(net, CS.preprocess(curr_frame))
hist_altern += score_util.fast_hist(label.flatten(), out.flatten(), n_cl)
acc, cl_acc, mean_iu, fw_iu = score_util.get_scores(hist_altern)
print('Alternating Clockwork')
print('acc\t\t cl acc\t\t mIU\t\t fwIU')
print('{:f}\t {:f}\t {:f}\t {:f}\t'.format(100 * acc, 100 * cl_acc, 100 * mean_iu, 100 * fw_iu))
# collect all preceding frames in the Cityscapes sequence surrounding each annotated frame
SEQ_LEN = 19
def scoremap_diff(prev_scores, scores):
prev_seg = prev_scores.argmax(axis=0).astype(np.uint8).copy()
curr_seg = scores.argmax(axis=0).astype(np.uint8).copy()
diff = np.array(prev_seg != curr_seg).mean()
return diff
def adaptive_clockwork_cityscapes(thresh):
hist = np.zeros((n_cl, n_cl))
num_frames = 0 # number of frames in total
num_update_frames = 0 # number of frames when clock fires
for idx in CS.list_label_frames('val'):
city = idx.split('_')[0]
# run on sequence of preceding frames, fully processing the first frame
frames = CS.collect_frame_sequence('val', idx, SEQ_LEN)
first_frame, frames = frames[0], frames[1:]
_ = run_net.segrun(net, CS.preprocess(first_frame))
prev_score = net.blobs['score_pool4'].data[0].copy()
num_frames += 1
for f in frames:
num_frames += 1
# Run to pool4 on current frame
run_net.feed_net(net, CS.preprocess(f))
net.forward(start='conv1_1', end='score_pool4')
curr_score = net.blobs['score_pool4'].data[0].copy()
# Decide whether or not to update to fc7
if scoremap_diff(prev_score, curr_score) >= thresh:
net.forward(start='conv5_1', end='upscore2')
prev_score = net.blobs['score_pool4'].data[0].copy()
num_update_frames += 1
# Compute full merge score on the annotated frame (the last frame)
net.forward(start='score_pool4c')
out = net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8)
label = CS.load_label('val', city, idx)
hist += score_util.score_out_gt(out, label, n_cl=n_cl)
acc, cl_acc, mean_iu, fw_iu = score_util.get_scores(hist)
print('Adaptive Clockwork: Threshold', thresh, ' Updated {:d}/{:d} frames ({:2.1f}%)'.format(num_update_frames, num_frames, 100.0*num_update_frames/num_frames))
print('acc\t cl acc\t mIU\t fwIU')
print('{:2.1f}\t {:2.1f}\t {:2.1f}\t {:2.1f}\t'.format(100*acc, 100*cl_acc, 100*mean_iu, 100*fw_iu))
return acc, cl_acc, mean_iu, fw_iu
for thresh in (0.25, 0.35, 0.47):
adaptive_clockwork_cityscapes(thresh)
|
from XiguaUser_pb2 import User as UserPb
class User:
def __init__(self, json=None):
self.ID = 0
self.name = ""
self.brand = ""
self.level = 0
self.type = 0
self.block = False
self.mute = False
if json:
if type(json) == bytes:
self.parsePb(json)
elif type(json) == UserPb:
self.parseUserPb(json)
else:
self.parse(json)
def parseUserPb(self, _user):
self.ID = _user.id
self.name = _user.nickname
self.brand = _user.fansClub.fansClub.title
self.level = _user.fansClub.fansClub.level
def parsePb(self, raw):
_user = UserPb()
_user.ParseFromString(raw)
self.parseUserPb(_user)
def parse(self, json):
if "extra" in json:
if "user" in json["extra"] and json["extra"]["user"] is not None:
self.ID = json["extra"]['user']['user_id']
self.name = json["extra"]['user']['name']
if "im_discipulus_info" in json["extra"] and json["extra"]["im_discipulus_info"] is not None:
self.level = json["extra"]["im_discipulus_info"]["level"]
self.brand = json["extra"]["im_discipulus_info"]["discipulus_group_title"]
if "user_room_auth_status" in json["extra"] and json["extra"]["user_room_auth_status"] is not None:
self.type = json["extra"]["user_room_auth_status"]["user_type"]
self.block = json["extra"]["user_room_auth_status"]["is_block"]
self.mute = json["extra"]["user_room_auth_status"]["is_silence"]
if "user_info" in json and json["user_info"] is not None:
self.ID = json['user_info']['user_id']
self.name = json['user_info']['name']
if "anchor" in json and json["anchor"] is not None:
if "user_info" in json["anchor"] and json["anchor"]['user_info'] is not None:
self.ID = json["anchor"]['user_info']['user_id']
self.name = json["anchor"]['user_info']['name']
if "user_id" in json:
self.ID = json["user_id"]
if "user_name" in json:
self.name = json["user_name"]
if self.type is None:
self.type = 0
if isinstance(self.level, str):
self.level = int(self.level)
def __str__(self):
if self.level == 0:
if self.type == 1:
return "[房管]{}".format(self.name)
elif self.type == 3:
return "[主播]{}".format(self.name)
else:
return "{}".format(self.name)
else:
if self.type != 0:
return "[{}{}]{}".format(self.brand, self.level, self.name)
return "<{}{}>{}".format(self.brand, self.level, self.name)
def __unicode__(self):
return self.__str__()
def __repr__(self):
return "西瓜用户【{}(ID:{})】".format(self.name, self.ID)
|
from arena import Arena
from agent import HAgent, AAgent
from helper import *
from trainer import qtrainer
from environment import Env
import random
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# init size, num_humans, num_targets, amount of half cover
# np.random.seed(1234)
# init environment
num = [20, 1, 1, 20]
agents = {}
targets = {}
a = Arena(num[0], num[1], num[2])
a, agents = place_soldiers(num[1], a, agents)
a, targets, t_pos = place_targets(num[2], a, targets)
a = place_half_cover(num[3], a)
env = Env(a, agents, targets, num[1])
Q, stat = qtrainer(env, 20, t_pos)
plt.plot(range(20), stat['ep_rewards'])
plt.xlabel('Episodes')
plt.ylabel('Reward')
plt.show()
env = env.env_reset()
pass
|
'''
General information for compound and reaction naming and formatting.
'''
from pathlib import Path
# get the path of the project folder
# (one directory above that of this script)
path = Path(__file__)
script_dir = path.parent
parent = script_dir.parents[1]
# to be implemented
# import pandas as pd
# compound_info = pd.read_csv(parent/'COMPOUND_INFO/compound_properties.csv', index_col = 0)
# load
info_container = []
with open(parent/'COMPOUND_INFO/compound_properties.csv', 'r') as f:
for c, line in enumerate(f):
if c == 0:
header = line.strip('\n').split(',')
else:
spl = line.strip('\n').split(',')
info_container.append(spl)
info_container = [list(i) for i in zip(*info_container)]
props_dict = {}
for n,i in zip(header, info_container):
props_dict[n] = i
colour_assignments = {k:v for k,v in
zip(props_dict['@ SMILES'], props_dict['colour'])}
for c,p in enumerate(props_dict['@@ SMILES']):
if p in colour_assignments:
pass
else:
colour_assignments[p] = props_dict['colour'][c]
for c,p in enumerate(props_dict['Other_names']):
for s in p.split(';'):
if s == '':
pass
elif s in colour_assignments:
pass
else:
colour_assignments[s] = props_dict['colour'][c]
molecular_masses = {k:float(v) for k,v in
zip(props_dict['@ SMILES'], props_dict['Mr_gmol-1'])}
canonical_SMILES = {k:v for k,v in
zip(props_dict['compound_name'], props_dict['@ SMILES'])}
for a,b in zip(props_dict['Other_names'], props_dict['@ SMILES']):
for s in a.split(';'):
if s != '':
canonical_SMILES[s] = b
smiles_to_names = {}
for c,v in enumerate(props_dict['compound_name']):
spl_name = v.split(' ')[0]
smiles_to_names[props_dict['@ SMILES'][c]] = spl_name
class_assignments = {k:v for k,v in
zip(props_dict['@ SMILES'], props_dict['Class'])}
for sm,cls in zip(props_dict['@@ SMILES'], props_dict['Class']):
class_assignments[sm] = cls
reaction_SMARTS = {}
reaction_class_colours = {}
reaction_class_short_names = {}
reaction_class_names = {}
with open(parent/'REACTION_INFO/reaction_SMARTS_templates.tsv', 'r') as f:
for c,line in enumerate(f):
if c==0:
pass
else:
ins = line.strip('\n').split('\t')
reaction_SMARTS[ins[0]] = ins[3]
reaction_class_colours[ins[0]] = ins[4]
reaction_class_colours[ins[6]] = ins[4]
reaction_class_short_names[ins[0]] = ins[5]
reaction_class_names[ins[0]] = ins[6]
with open(parent/'COMPOUND_INFO/compound_numbering.txt', 'r') as f:
lines = f.readlines()
lines = [l.strip('\n') for l in lines]
compound_numbering = {}
for l in lines:
SMILES_num = l.split(',')
compound_numbering[SMILES_num[0]] = SMILES_num[1]
reaction_colours = {}
with open(parent/"REACTION_INFO/reaction_colour_assignments.csv", 'r') as f:
lines = f.readlines()
lines = [l.strip('\n').split(',') for l in lines][1:]
reaction_colours = {l[0]:l[1] for l in lines}
|
from typing import Type
from unittest import mock
import copy
from src.database import settings
from pytest import raises
def generate_setting_cls(validate_return: bool) -> Type[settings.Setting]:
class Foo(settings.Setting):
set_was_called = False
@staticmethod
def _validate_input(*args) -> bool:
return validate_return
@staticmethod
def get_value(*args) -> None:
pass
@staticmethod
def _set_value_core(*args) -> None:
Foo.set_was_called = True
return Foo
class TestGenerateSetting:
def test_true(self) -> None:
"""
Test code needs testing :P
"""
foo = generate_setting_cls(True)
assert foo()._validate_input(None, None, None) is True # type: ignore
def test_false(self) -> None:
"""
Test code needs testing :P
"""
foo = generate_setting_cls(False)
assert foo()._validate_input(None, None, None) is False # type: ignore
class TestValidate:
def test_validate_pass(self) -> None:
Foo = generate_setting_cls(True)
bar = Foo()
bar.set_value(None, None, None) # type: ignore
assert Foo.set_was_called, "Foo.set_value_core was not called" # type: ignore
def test_validate_fail(self) -> None:
Foo = generate_setting_cls(False)
bar = Foo()
with raises(ValueError):
bar.set_value(None, None, None) # type: ignore
assert not Foo.set_was_called, "Foo.set_value_core was called" # type: ignore
class TestConvertor:
def setup(self):
self.cls = copy.deepcopy(settings.ConverterSetting)
self.cls._set_value_core = mock.Mock()
self.cls.get_value = mock.Mock()
self.cls.__abstractmethods__ = set()
def test_convertor_sucess(self):
self.cls._converter = mock.Mock(return_value="abc")
Foo = self.cls()
Foo.set_value(None, None, None) # type: ignore
Foo._set_value_core.assert_called_once_with(None, None, "abc")
def test_convertor_fail(self):
# results in ValueError
self.cls._converter = lambda s, v: max([])
Foo = self.cls()
with raises(ValueError):
Foo.set_value(None, None, None) # type: ignore
Foo._set_value_core.assert_not_called()
|
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Isaac Saito
from __future__ import division
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtGui import QBrush, QStandardItem
from rqt_py_common.data_items import ReadonlyItem
from rqt_reconfigure import logging
from rqt_reconfigure.param_client_widget import ParamClientWidget
class TreenodeQstdItem(ReadonlyItem):
"""
Extending ReadonlyItem.
the display content of this item shouldn't be modified.
"""
NODE_FULLPATH = 1
def __init__(self, context, *args):
"""
Tree node initialization.
:param args[0]: str (will become 1st arg of QStandardItem)
:param args[1]: integer value that indicates whether this class
is node that has GRN (Graph Resource Names, see
http://www.ros.org/wiki/Names). This can be None
"""
grn_current_treenode = args[0]
self._raw_param_name = grn_current_treenode
self._list_treenode_names = self._raw_param_name.split('/')[1:]
self._toplevel_treenode_name = self._list_treenode_names[0]
super(TreenodeQstdItem, self).__init__(grn_current_treenode)
self._context = context
self._param_client = None
# ParamClientWidget
self._param_client_widget = None
def reset(self):
self._param_client_widget = None
if self._param_client is not None:
self._param_client.close()
del self._param_client
self._param_client = None
def get_param_client_widget(self):
"""
Get the param_client_widget.
@rtype: ParamClientWidget (QWidget)
@return: None if param_client is not yet generated.
@raise ROSException:
"""
if not self._param_client_widget:
logging.debug('In get_param_client_widget 4')
self._param_client_widget = ParamClientWidget(
self._context, self._raw_param_name
)
"""
Creating the ParamClientWidget transfers ownership of the
_param_client to it. If it is destroyed from Qt, we need to
clear our reference to it and stop the param server thread we
had.
"""
self._param_client_widget.destroyed.connect(self.reset)
logging.debug('In get_param_client_widget 5')
return self._param_client_widget
def enable_param_items(self):
"""
Create QStdItem per parameter and addColumn them to myself.
:rtype: None if _param_client is not initiated.
"""
if not self._param_client_widget:
return None
param_names = self._param_client_widget.get_treenode_names()
param_names_items = []
brush = QBrush(Qt.lightGray)
for param_name in param_names:
item = ReadonlyItem(param_name)
item.setBackground(brush)
param_names_items.append(item)
logging.debug('enable_param_items len of param_names={}'.format(
len(param_names_items)
))
self.appendColumn(param_names_items)
def get_raw_param_name(self):
return self._raw_param_name
def get_treenode_names(self):
"""
Get tree node names.
:rtype: List of string. Null if param
"""
return self._list_treenode_names
def get_node_name(self):
"""
Get the node name.
:return: A value of single tree node (ie. NOT the fullpath node name).
Ex. suppose fullpath name is /top/sub/subsub/subsubsub and you
are at 2nd from top, the return value is subsub.
"""
return self._toplevel_treenode_name
def type(self): # noqa: A003
return QStandardItem.UserType
|
# Generated by Django 3.2 on 2022-03-11 15:44
import datetime
import django.db.models.deletion
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
("workspaces", "0002_alter_notionworkspace_icon_url"),
("tasks", "0009_alter_recurringtask_start_time"),
]
operations = [
migrations.AddField(
model_name="recurringtask",
name="workspace",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="tasks",
to="workspaces.notionworkspace",
),
),
migrations.AlterField(
model_name="recurringtask",
name="start_time",
field=models.DateTimeField(
default=datetime.datetime(2022, 3, 12, 15, 44, 39, 656257, tzinfo=utc)
),
),
]
|
import Python_API #Initializes Runtime
from jnius import autoclass
from Python_API.DataTypes.LongType import LongType
input_handler_send_proxy = autoclass("org.wso2.siddhi.InputHandlerSendProxy")
class InputHandler(object):
def __init__(self):
raise NotImplementedError("Initialize InputHandler using ExecutionPlanRuntime")
def __new__(cls):
bare_instance = object.__new__(cls)
bare_instance.input_handler_proxy = None
return bare_instance
def send(self,data):
'''
Sends data to stream
:param data:
:return:
'''
#NOTE: Directly passing list of data to org.wso2.siddhi.core.stream.input.InputHandler is not possibly since
# Pyjnius creates the input array based on datatype of first element
#TODO: Try to improve the logic here by reducing the number of JNI Calls
i1 = input_handler_send_proxy(len(data))
for d in data:
if type(d) is float:
i1.putFloat(d)
elif type(d) is LongType:
i1.putLong(int(d))
elif type(d) is int:
i1.putInt(d)
elif type(d) is str:
i1.putString(d)
else:
print(type(d))
i1.send(self.input_handler_proxy)
@classmethod
def _fromInputHandlerProxy(cls, input_handler_proxy):
'''
Internal Constructor to wrap around JAVA Class InputHandler
:param input_handler_proxy:
:return:
'''
instance = cls.__new__(cls)
instance.input_handler_proxy = input_handler_proxy
return instance
|
"""
Helper functions for working with the NGA flatfile.
"""
# stdlib imports
import os
import pkg_resources
import logging
# third party imports
import numpy as np
import pandas as pd
from obspy.geodetics.base import gps2dist_azimuth
def get_nga_record_sequence_no(st, eq_name, distance_tolerance=50):
"""
Returns the associate NGA record sequence number for a given StationStream.
Args:
st (gmprocess.stationstream.StationStream):
Station stream to get record sequence number for.
eq_name (str):
Earthquake name for finding NGA record sequence numbers. Must
match a value in the 'Earthquake Name' column of the file
gmprocess/data/nga_w2_selected.csv.
distance_tolerance (float):
Distance tolerance (in meters) between StationStream location
coordinates and the NGA location coordinates.
Default is 50 meters.
Returns:
int: Matching record sequence number from NGA flatfile. Returns
numpy.nan if record sequence number is not found.
"""
df_nga = pd.read_csv(pkg_resources.resource_filename(
'gmprocess', os.path.join('data', 'nga_w2_selected.csv')))
nga_event = df_nga.loc[df_nga['Earthquake Name'] == eq_name]
lat = st[0].stats.coordinates.latitude
lon = st[0].stats.coordinates.longitude
matched_records_nos = []
for record_idx, record in nga_event.iterrows():
dist = gps2dist_azimuth(
lat, lon, record['Station Latitude'],
record['Station Longitude'])[0]
if dist < distance_tolerance:
matched_records_nos.append(record['Record Sequence Number'])
if len(matched_records_nos) > 1:
logging.warning('Found multiple matching records.')
return np.nan
elif len(matched_records_nos) < 1:
logging.warning('Did not find any matching records.')
return np.nan
else:
return matched_records_nos[0]
|
print("""
089) Crie um programa que leia nome e duas notas de vários alunos e guarde
tudo em uma lista composta. No final, mostre um boletim contendo a média de
cada um e permita que o usuário possa mostrar as notas de cada aluno
individualmente.
""")
alunos = []
indice = 0
titulo = ' Sistema de Cadastro e Visualização de Notas '
print('-'*len(titulo))
print(f'{titulo}')
print('-'*len(titulo))
### Preenchimento da lista de alunos com nome, duas notas e a média ente elas
while True:
### A cada iteração uma lista com dois elementos é adicionada a "alunos".
### O primeiro, i.e. "aluno[0]", é uma "string" vazia, onde o nome do
### aluno será armazenado.
### O segundo, i.e. "alunos[1]", é uma nova lista contendo três zeros.
### Estes zeros correspondem, respectivamente, às duas notas do aluno
### e à média aritmética delas
alunos.append(['', [0, 0, 0]])
nome = input(f'Aluno {indice+1:02d}: ').strip()
while True:
nota01 = float(input('Nota 01: ').strip())
if nota01 >= 0 and nota01 <= 10:
break
print('Informe uma nota válida. Valores entre 0 e 10.')
while True:
nota02 = float(input('Nota 02: ').strip())
if nota02 >= 0 and nota02 <= 10:
break
print('Informe uma nota válida. Valores entre 0 e 10.')
media = (nota01+nota02)/2
### Cada indexação com a variável "indice" corresponde a um aluno.
### A partir daí, o primeiro índice zero ("[0]") corresponde ao nome
### do aluno
alunos[indice][0] = nome
### Aqui o primeiro índice um ("[1]") acessa a lista que contém as
### notas e média de cada aluno. Estes últimos, por sua vez, são
### acessados, respectivamente, pelos últimos indices zero ("[0]"),
### um ("[1]") e dois ("[2]")
alunos[indice][1][0] = nota01
alunos[indice][1][1] = nota02
alunos[indice][1][2] = media
while True:
continuar = input('Deseja continuar? (s/n) ').strip().lower()
if continuar == 's' or continuar == 'n':
break
print('Digite S/s ou N/n para prosseguir ou suspender, \
respectivamente.')
if continuar == 'n':
break
### O motivo de incrementar a variável "indice" apenas no final,
### logo após o teste de continuidade, é que ela só precisa ser
### alterada, se o laço não for interrompido
indice += 1
### Apresentação do Boletim com um sequencial, nome e média de cada aluno
print('-'*len(titulo))
print('{:^{tamanho}}'.format('Boletins', tamanho=len(titulo)))
print('-'*len(titulo))
print('{:<3} |{:^30}| {}'.format('Número', 'Aluno', 'Média'))
print('-'*len(titulo))
for posicao, item in enumerate(alunos):
### Aqui a variável "posicao" guarda o índice de cada lista que
### contém o nome do aluno, as notas e a média. Essas listas, por
### sua vez, são referenciadas, a cada iteração, por "item". É
### importante lembar que estas informações estão armazenadas de
### forma posicional, a saber:
### - item[0]: ...... Nome
### - item[1][0]: ... Primeira nota
### - item[1][1]: ... Segunda nota
### - item[1][2]: ... Média
print(f'{posicao+1:06d} |{item[0]:.<30.25}|{item[1][2]:>5,.2f}')
print('-'*len(titulo))
### Apresentação das notas de cada aluno, individualmente
while True:
mensagemDeSaida = 'Ver boletim (999 para sair): '
boletimParaApresentar = int(input(mensagemDeSaida).strip())
if boletimParaApresentar == 999:
print('Programa finalizado!')
break
else:
### É importante notar que, apesar de utilizar os mesmos
### identificadores do laço de criação da lista, não há
### problemas, uma vez que os escopos em que as variáveis
### existem são distintos e não conflitantes
nome = alunos[boletimParaApresentar-1][0]
nota01 = alunos[boletimParaApresentar-1][1][0]
nota02 = alunos[boletimParaApresentar-1][1][1]
print(f'As notas de "{nome}" foram {nota01} e {nota02}')
|
import warnings
from itertools import chain
from typing import Tuple, List, Dict, Set
from .model import CleanAccount
from ...load.balances import Account, Snapshot
from ...load.transactions import Transaction
from ...log import logger
try:
# private configuration
# default_account is just a string with matches the to_institution for an account. if CSV is missing account info,
# or manual transactions don't have an account associated with it, it uses that (probably just my typical
# checking account/cash)
from .cleandata_conf import accounts_conf, default_account # type: ignore[import]
except ImportError:
warnings.warn("Could not import cleandata_conf.py")
# no extra personal config, so just use defaults
accounts_conf = lambda: [] # type: ignore[return-value, assignment]
default_account: str = "<NO ACCOUNT>" # type: ignore[no-redef]
# what clean_data uses to get CleanAccount information, to clean the data
# define a function in ./cleandata_conf.py called accounts_conf which
# returns an iterable of CleanAccount namedtuples, to fix the names of account/transactions
def get_configuration() -> List[CleanAccount]:
return list(accounts_conf())
def clean_data(
balances: List[Snapshot], transactions: List[Transaction]
) -> Tuple[List[Snapshot], List[Transaction]]:
cleaners: List[CleanAccount] = get_configuration()
# create O(1) access, use non-nullable fields
cleaner_map: Dict[Tuple[str, str, str], CleanAccount] = {
(cl.from_institution, cl.from_account, cl.from_account_type): cl
for cl in cleaners
}
cleaned_balances: List[Snapshot] = []
# clean data for accounts on accounts
for snapshot in balances:
cleaned_accounts = []
for acc in snapshot.accounts:
# if this should be replaced
key = (acc.institution, acc.account, acc.account_type)
if key in cleaner_map:
cl: CleanAccount = cleaner_map[key]
cleaned_accounts.append(
Account(
institution=cl.to_institution, # replace metadata
account=cl.to_account,
account_type=cl.to_account_type,
current=acc.current,
available=acc.available,
limit=acc.limit,
currency=acc.currency,
)
)
else:
cleaned_accounts.append(acc)
cleaned_balances.append(Snapshot(at=snapshot.at, accounts=cleaned_accounts))
# replace account names on transactions
replace_account: Dict[str, str] = {
cl.from_account: cl.to_account for cl in cleaners
}
for tr in transactions:
if tr.account.strip() and tr.account in replace_account:
tr.account = replace_account[tr.account]
# validate data
# get names of all the accounts, including any manual ones
account_names: Set[str] = set(
chain(*[[acc.account for acc in cl.accounts] for cl in cleaned_balances]) # type: ignore[arg-type]
)
# make sure every transaction is attached to an account
# could just define a 'cash' institution if you wanted to keep track of money in wallet
for tr in transactions:
if tr.account not in account_names:
logger.debug("Using default account name for {}...".format(tr))
tr.account = default_account
# warnings.warn(
# "Could not find balance information for {} (in {}) in accounts!: {}".format(
# tr.account, tr, account_names
# )
# )
return (cleaned_balances, transactions)
|
from __future__ import annotations
from typing import List, Union, TYPE_CHECKING, Optional, Callable, Coroutine, Any, Dict
from pydantic import constr, conint
if TYPE_CHECKING:
from roid import CommandType
from roid.app import SlashCommands
from roid.components import Component, ButtonStyle, SelectOption, ComponentContext
from roid.command import Command, CommandContext, CommandGroup, AutoCompleteHandler
from roid.checks import CommandCheck
from roid.response import ResponsePayload
from roid.interactions import Interaction
class CallDeferredAttr:
def __init__(self, attr: str, *args, **kwargs):
self.attr = attr
self.args = args
self.kwargs = kwargs
def __call__(self, caller):
getattr(caller, self.attr)(*self.args, **self.kwargs)
return caller
class DeferredAppItem:
def __init__(
self,
target_name: str,
call_pipeline: List[Union[dict, list, CallDeferredAttr]],
):
self._initialised = None
self._target_name = target_name
self._call_pipeline = call_pipeline
def __call__(self, app: SlashCommands):
if self._initialised is not None:
raise TypeError("deferred object already initialised")
caller = getattr(app, self._target_name)
for params in self._call_pipeline:
if isinstance(params, dict):
caller = caller(**params)
elif isinstance(params, CallDeferredAttr):
caller = params(caller)
else:
caller = caller(*params)
self._initialised = caller
return caller
class DeferredComponent(DeferredAppItem):
"""A identifier type for deferring components."""
def error(
self,
func: Callable[[Interaction, Exception], Coroutine[Any, Any, ResponsePayload]],
):
"""
Maps the given error handling coroutine function to the commands general
error handler.
This will override the existing error callback.
Args:
func:
The function callback itself, this can be either a coroutine function
or a regular sync function (sync functions will be ran in a new
thread.)
"""
if self._initialised is not None:
self._initialised.error(func)
self._call_pipeline.append(CallDeferredAttr("error", func))
return func
def disabled(self) -> CommandContext:
"""Returns a disabled version of this button."""
if self._initialised is None:
raise TypeError(f"component not initialised")
return self._initialised.disabled()
class DeferredButton(DeferredComponent):
"""A deferred component which is already set to target the button method."""
def __init__(
self,
callback,
label: str,
style: ButtonStyle,
*,
custom_id: Optional[str] = None,
disabled: bool = False,
emoji: str = None,
url: Optional[str] = None,
oneshot: bool = False,
):
call_pipeline = [
dict(
label=label,
style=style,
custom_id=custom_id,
disabled=disabled,
emoji=emoji,
url=url,
oneshot=oneshot,
),
[callback],
]
super().__init__("button", call_pipeline)
def __call__(self, app: SlashCommands) -> Component:
return super().__call__(app)
class DeferredSelect(DeferredComponent):
"""A deferred component which is already set to target the select method."""
def __init__(
self,
callback,
custom_id: Optional[str] = None,
disabled: bool = False,
placeholder: str = "Select an option.",
min_values: int = 1,
max_values: int = 1,
oneshot: bool = False,
):
call_pipeline = [
dict(
placeholder=placeholder,
custom_id=custom_id,
min_values=min_values,
max_values=max_values,
disabled=disabled,
oneshot=oneshot,
),
[callback],
]
super().__init__("select", call_pipeline)
def __call__(self, app: SlashCommands) -> Component:
return super().__call__(app)
def with_options(self, options: List[SelectOption]) -> ComponentContext:
"""
Takes a general select component and populates it with the given options.
NOTE: This is only valid if the component is a select type.
WARNING: If this is not done the general select will be rejected.
Args:
options:
A list of select options for the user to choose from.
Returns:
The populated context of the component.
"""
if self._initialised is None:
raise TypeError(f"component not initialised")
return self._initialised.with_options(options)
class DeferredCommand(DeferredAppItem):
def __init__(
self,
callback,
name: str,
description: Optional[str] = None,
guild_id: Optional[int] = None,
guild_ids: Optional[List[int]] = None,
type: Optional[CommandType] = None,
default_permissions: bool = False,
defer_register: bool = False,
):
"""
A command like structure that creates a build pipeline
when it's initialised by the app.
This is useful for code organisation as it allows you to avoid circular imports
in the structure.
This has some limitations in the sense that only the public command fields are
available and register() must be initialised first or a TypeError will be raised.
todo attrs docs
"""
attrs = dict(
name=name,
description=description,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=defer_register,
)
if type:
attrs["type"] = type
super().__init__(
"command",
[
attrs,
[callback],
],
)
self._initialised: Optional[Command] = None
@property
def ctx(self) -> CommandContext:
"""
Gets the general command context data.
This is naive of any guild ids registered for this command.
"""
return self._initialised.ctx
def add_check(self, check: CommandCheck, *, at: int = -1):
"""
Adds a check object to the command's check pipeline.
Checks are ran in the order they are added and can directly
modify the interaction data passed to the following checks.
Args:
check:
The check object itself.
at:
The desired index to insert the check at.
If the index is beyond the current length of the pipeline
the check is appended to the end.
"""
if self._initialised is not None:
self._initialised.add_check(check, at=at)
self._call_pipeline.append(CallDeferredAttr("add_check", check=check, at=at))
def register(self, app: SlashCommands):
"""
Register the command with the given app.
If any guild ids are given these are registered as specific
guild commands rather than as a global command.
Args:
app:
The slash commands app which the commands
should be registered to.
"""
if self._initialised is not None:
return self._initialised.register(app)
raise TypeError(f"deferred command is not initialised yet.")
def error(
self,
func: Callable[[Interaction, Exception], Coroutine[Any, Any, ResponsePayload]],
):
"""
Maps the given error handling coroutine function to the commands general
error handler.
This will override the existing error callback.
Args:
func:
The function callback itself, this can be either a coroutine function
or a regular sync function (sync functions will be ran in a new
thread.)
"""
if self._initialised is not None:
self._initialised.error(func)
self._call_pipeline.append(CallDeferredAttr("error", func))
return func
def autocomplete(
self,
func: Optional[AutoCompleteHandler.Callback] = None,
*,
for_="_AUTO_COMPLETE_DEFAULT",
):
"""
Add a callback for auto complete interaction
requests for all or a specific option.
This decorator can be used either as a generic @command.autocomplete
or pass a option target via @command.autocomplete(for_="my_option_name").
Args:
func:
The callback for the autocomplete interaction.
This is only required when adding a general handler for all options.
for_:
A optional name to target a specific option.
If this is given the callback will only be invoked if the value is
focused.
The callback required will also just be given the raw `value: str`
keyword opposed to a set of kwargs.
"""
if self._initialised is not None:
return self._initialised.autocomplete(func, for_=for_)
if func is not None:
self._call_pipeline.append(CallDeferredAttr("autocomplete", func))
return func
def wrapper(func_):
self._call_pipeline.append(
CallDeferredAttr("autocomplete", func_, for_=for_)
)
return func_
return wrapper
def __call__(self, *args, **kwargs):
if self._initialised is not None:
return self._initialised.__call__(*args, **kwargs)
return super().__call__(*args, **kwargs)
class DeferredGroupCommand(DeferredCommand):
def __init__(
self,
callback,
name: str,
):
super().__init__(callback, name, description="Not Used")
def register(self, app: SlashCommands):
raise TypeError("group commands cannot be individually registered.")
class DeferredCommandGroup(DeferredAppItem):
def __init__(
self,
name: str,
description: str = None,
*,
guild_id: int = None,
guild_ids: List[int] = None,
default_permissions: bool = True,
defer_register: bool = False,
group_name: str = "command",
group_description: str = "Select a sub command to run.",
):
"""
Registers a command group with the given app.
The description is required.
If either the conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the command. This can only be applied to
`CommandType.CHAT_INPUT` commands.
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
group_name:
The name of the parameter to label the sub commands group select as.
group_description:
The description of the select option for the sub commands.
"""
attrs = dict(
name=name,
description=description,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=defer_register,
group_name=group_name,
group_description=group_description,
)
super().__init__(
"group",
[attrs],
)
self._commands: Dict[str, DeferredGroupCommand] = {}
self._initialised: Optional[CommandGroup] = None
def __call__(self, *args, **kwargs):
if self._initialised is not None:
return self._initialised.__call__(*args, **kwargs)
self._call_pipeline[0]["existing_commands"] = self._commands
super().__call__(*args, **kwargs)
@property
def ctx(self) -> CommandContext:
"""
Gets the general command context data.
This is naive of any guild ids registered for this command.
"""
return self._initialised.ctx
def command(self, name: str):
"""
Registers a command with the given app.
The command type is always `CommandType.CHAT_INPUT`.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
"""
def wrapper(func):
if self._initialised is not None:
return self._initialised.command(name=name)(func)
cmd = DeferredGroupCommand(
callback=func,
name=name,
)
self._commands[name] = cmd
return cmd
return wrapper
def add_check(self, check: CommandCheck, *, at: int = -1):
"""
Adds a check object to the command's check pipeline.
Checks are ran in the order they are added and can directly
modify the interaction data passed to the following checks.
Args:
check:
The check object itself.
at:
The desired index to insert the check at.
If the index is beyond the current length of the pipeline
the check is appended to the end.
"""
if self._initialised is not None:
self._initialised.add_check(check, at=at)
self._call_pipeline.append(CallDeferredAttr("add_check", check=check, at=at))
def register(self, app: SlashCommands):
"""
Register the command with the given app.
If any guild ids are given these are registered as specific
guild commands rather than as a global command.
Args:
app:
The slash commands app which the commands
should be registered to.
"""
if self._initialised is not None:
return self._initialised.register(app)
raise TypeError(f"deferred command is not initialised yet.")
def error(
self,
func: Callable[[Interaction, Exception], Coroutine[Any, Any, ResponsePayload]],
):
"""
Maps the given error handling coroutine function to the commands general
error handler.
This will override the existing error callback.
Args:
func:
The function callback itself, this can be either a coroutine function
or a regular sync function (sync functions will be ran in a new
thread.)
"""
if self._initialised is not None:
self._initialised.error(func)
self._call_pipeline.append(CallDeferredAttr("error", func))
return func
class CommandsBlueprint:
def __init__(self):
self._commands: List[DeferredCommand] = []
self._components: List[DeferredComponent] = []
def group(
self,
name: str,
description: str,
*,
guild_id: int = None,
guild_ids: List[int] = None,
default_permissions: bool = True,
defer_register: bool = False,
group_name: str = "command",
group_description: str = "Select a sub command to run.",
):
"""
Registers a command with the given app.
If the command type is either `CommandType.MESSAGE` or `CommandType.USER`
there cannot be any description however, if the command type
is `CommandType.CHAT_INPUT` then description is required.
If either of those conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the command. This can only be applied to
`CommandType.CHAT_INPUT` commands.
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
group_name:
The name of the parameter to label the sub commands group select as.
group_description:
The description of the select option for the sub commands.
"""
cmd = DeferredCommandGroup(
name=name,
description=description,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=not defer_register,
group_name=group_name,
group_description=group_description,
)
self._commands.append(cmd) # noqa
return cmd
def command(
self,
name: str,
description: Optional[str] = None,
default_permissions: bool = True,
guild_id: Optional[int] = None,
guild_ids: Optional[List[int]] = None,
type: Optional[CommandType] = None,
defer_register: bool = True,
):
"""
Registers a command with the given app.
If the command type is either `CommandType.MESSAGE` or `CommandType.USER`
there cannot be any description however, if the command type
is `CommandType.CHAT_INPUT` then description is required.
If either of those conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the command. This can only be applied to
`CommandType.CHAT_INPUT` commands.
type:
The type of command. This determines if it's a chat input command,
user context menu command or message context menu command.
defaults to `CommandType.CHAT_INPUT`
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
"""
def wrapper(func):
cmd = DeferredCommand(
callback=func,
name=name,
description=description,
default_permissions=default_permissions,
guild_id=guild_id,
guild_ids=guild_ids,
type=type,
defer_register=defer_register,
)
self._commands.append(cmd)
return cmd
return wrapper
def button(
self,
label: str,
style: ButtonStyle,
*,
custom_id: Optional[str] = None,
disabled: bool = False,
emoji: str = None,
url: Optional[str] = None,
oneshot: bool = False,
):
"""
Attaches a button component to the given command.
Args:
style:
The set button style. This can be any set style however url styles
require the url kwarg and generally would be better off using
the hyperlink helper decorator.
custom_id:
The custom button identifier. If you plan on having long running
persistent buttons that dont require context from their parent command;
e.g. reaction roles. You probably want to set this.
disabled:
If the button should start disabled or not.
label:
The button label / text shown on the button.
emoji:
The set emoji for the button. This should be a custom emoji
not a unicode emoji (use the `label` field for that.)
url:
The hyperlink url, if this is set the function body is not invoked
on click along with the `emoji` and `style` field being ignored.
oneshot:
If set to True this will remove the context from the store as soon
as it's invoked for the first time. This allows you to essentially
create one shot buttons which are invalidated after the first use.
"""
def wrapper(func):
cmd = DeferredButton(
callback=func,
label=label,
style=style,
custom_id=custom_id,
disabled=disabled,
emoji=emoji,
url=url,
oneshot=oneshot,
)
self._components.append(cmd)
return cmd
return wrapper
def select(
self,
*,
custom_id: Optional[
constr(strip_whitespace=True, regex="a-zA-Z0-9", min_length=1)
] = None,
disabled: bool = False,
placeholder: str = "Select an option.",
min_values: conint(ge=0, le=25) = 1,
max_values: conint(ge=0, le=25) = 1,
oneshot: bool = False,
):
"""
A select menu component.
This will occupy and entire action row so any components sharing the row
will be rejected (done on a first come first served basis.)
Args:
custom_id:
The custom button identifier. If you plan on having long running
persistent buttons that dont require context from their parent command;
e.g. reaction roles. You probably want to set this.
disabled:
If the button should start disabled or not.
placeholder:
The placeholder text the user sees while the menu is not focused.
min_values:
The minimum number of values the user must select.
max_values:
The maximum number of values the user can select.
oneshot:
If set to True this will remove the context from the store as soon
as it's invoked for the first time. This allows you to essentially
create one shot buttons which are invalidated after the first use.
"""
def wrapper(func):
cmd = DeferredSelect(
callback=func,
custom_id=custom_id,
disabled=disabled,
oneshot=oneshot,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
)
self._components.append(cmd)
return cmd
return wrapper
|
from .. import base
def getname(obj):
try:
return obj.name
except AttributeError:
return obj
class User(base.Resource):
HUMAN_ID = True
def __repr__(self):
return '<User: %s>' % getattr(self, 'name', 'unknown-name')
def delete(self):
self.manager.delete(self)
def update(self, name=None):
self.manager.update(self, name=name)
class UserManager(base.BootingManagerWithFind):
resource_class = User
def get(self, user):
return self._get("/users/%s" % getname(user), "user")
def list(self):
return self._list("/users", "users")
def create(self, name="", password=None, **kwargs):
body = {
"user": {}
}
if name:
body["user"]["name"] = name
if password:
body["user"]["password"] = password
return self._create("/users", body, "user")
def update(self, user, password=None):
body = {
"user": {
}
}
if password:
body['user']['password'] = password
return self._update("/users/%s" % getname(user), body, "")
def delete(self, user):
return self._delete("/users/%s" % getname(user))
|
from yattag import Doc
class MessageFormatter:
def __init__(self):
pass
def format_message(self, message):
"""
appends messageML tags to plain text and returns a dictionary:
{message : messageML object}
"""
doc,tag,text,line = Doc().ttl()
with tag('messageML'):
text(message)
return dict(message = doc.getvalue())
|
from dict_hash import sha256
from .utils import create_dict
import os
def test_dict_hash():
path = sha256(create_dict())
assert os.path.exists(path)
os.remove(path)
|
from .transformer import Transformer
from .captioning_model import CaptioningModel
|
# Copyright (c) 2015, 2016, 2020 Florian Wagner
#
# This file is part of Monet.
"""Module containing the `GeneSet` class."""
import hashlib
from typing import List, Iterable
class GeneSet:
"""A gene set.
A gene set is just what the name implies: A set of genes. Usually, gene
sets are used to group genes that share a certain property (e.g., genes
that perform related functions, or genes that are frequently co-expressed).
The genes in the gene set are not ordered.
GeneSet instances are hashable and should therefore be considered to be
immutable.
Parameters
----------
id: str
See :attr:`id` attribute.
name: str
See :attr:`name` attribute.
genes: set, list or tuple of str
See :attr:`genes` attribute.
source: str, optional
See :attr:`source` attribute. (None)
collection: str, optional
See :attr:`collection` attribute. (None)
description: str, optional
See :attr:`description` attribute. (None)
Attributes
----------
id_: str
The (unique) ID of the gene set.
name: str
The name of the gene set.
genes: set of str
The list of genes in the gene set.
source: None or str
The source / origin of the gene set (e.g., "MSigDB")
collection: None or str
The collection that the gene set belongs to (e.g., "c4" for gene sets
from MSigDB).
description: None or str
The description of the gene set.
"""
def __init__(self, id: str, name: str, genes: Iterable[str],
source: str = None, collection: str = None,
description: str = None):
self._id = id
self._name = name
self._genes = frozenset(genes)
self._source = source
self._collection = collection
self._description = description
@property
def _gene_str(self):
return ', '.join('"%s"' % g for g in sorted(self._genes))
@property
def _source_str(self):
return '"%s"' % self._source \
if self._source is not None else 'None'
@property
def _coll_str(self):
return '"%s"' % self._collection \
if self._collection is not None else 'None'
@property
def _desc_str(self):
return '"%s"' % self._description \
if self._description is not None else 'None'
def __repr__(self):
return ('<%s instance (id="%s", name="%s", genes=[%s], source=%s, '
'collection=%s, description=%s)'
% (self.__class__.__name__,
self._id, self._name, self._gene_str,
self._source_str, self._coll_str, self._desc_str))
def __str__(self):
return ('<%s "%s" (id=%s, source=%s, collection=%s, size=%d'
% (self.__class__.__name__, self._name,
self._id, self._source_str, self._coll_str, self.size))
def __eq__(self, other):
if self is other:
return True
elif type(self) is type(other):
return repr(self) == repr(other)
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
@property
def _data(self):
data_str = ';'.join([
str(repr(var)) for var in
[self._id, self._name, self._genes,
self._source, self._collection, self._description]
])
data = data_str.encode('UTF-8')
return data
def __hash__(self):
return hash(self._data)
@property
def hash(self):
"""MD5 hash value for the gene set."""
return str(hashlib.md5(self._data).hexdigest())
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def genes(self):
return self._genes
@property
def source(self):
return self._source
@property
def collection(self):
return self._collection
@property
def description(self):
return self._description
@property
def size(self):
"""The size of the gene set (i.e., the number of genes in it)."""
return len(self._genes)
def to_list(self) -> List[str]:
"""Converts the GeneSet object to a flat list of strings.
Note: see also :meth:`from_list`.
Parameters
----------
Returns
-------
list of str
The data from the GeneSet object as a flat list.
"""
src = self._source or ''
coll = self._collection or ''
desc = self._description or ''
l = [self._id, src, coll, self._name,
','.join(sorted(self._genes)), desc]
return l
@classmethod
def from_list(cls, l: Iterable[str]):
"""Generate an GeneSet object from a list of strings.
Note: See also :meth:`to_list`.
Parameters
----------
l: list or tuple of str
A list of strings representing gene set ID, name, genes,
source, collection, and description. The genes must be
comma-separated. See also :meth:`to_list`.
Returns
-------
`genometools.basic.GeneSet`
The gene set.
"""
id_ = l[0]
name = l[3]
genes = l[4].split(',')
src = l[1] or None
coll = l[2] or None
desc = l[5] or None
return cls(id_, name, genes, src, coll, desc)
|
from acrolib.cost_functions import ( # pylint: disable=no-name-in-module
norm_l1,
norm_l2,
sum_squared,
norm_infinity,
weighted_sum_squared,
)
import numpy as np
from numpy.testing import assert_almost_equal
from .slow_cost_functions import (
cost_function_l1_norm,
cost_function_l2_norm,
cost_function_sum_squared,
cost_function_linfinity_norm,
cost_function_weighted_sum_squared,
)
np.random.seed(42)
class TestCythonFunctions:
def test_norm_l1(self):
A = np.random.rand(5, 3)
B = np.random.rand(6, 3)
C_desired = cost_function_l1_norm(A, B)
C = norm_l1(A, B)
assert C.shape == C_desired.shape
assert_almost_equal(C, C_desired)
def test_norm_l2(self):
A = np.random.rand(5, 3)
B = np.random.rand(6, 3)
C_desired = cost_function_l2_norm(A, B)
C = norm_l2(A, B)
assert C.shape == C_desired.shape
assert_almost_equal(C, C_desired)
def test_sum_squared(self):
A = np.random.rand(5, 3)
B = np.random.rand(6, 3)
C_desired = cost_function_sum_squared(A, B)
C = sum_squared(A, B)
assert C.shape == C_desired.shape
assert_almost_equal(C, C_desired)
def test_weighted_sum_squared(self):
A = np.random.rand(5, 3)
B = np.random.rand(6, 3)
w = np.random.rand(3)
C_desired = cost_function_weighted_sum_squared(A, B, w)
C = weighted_sum_squared(A, B, w)
assert C.shape == C_desired.shape
assert_almost_equal(C, C_desired)
def test_norm_infinity(self):
A = np.random.rand(5, 3)
B = np.random.rand(6, 3)
C_desired = cost_function_linfinity_norm(A, B)
C = norm_infinity(A, B)
assert C.shape == C_desired.shape
assert_almost_equal(C, C_desired)
|
import numpy; from numpy import sin,cos; from taylorpoly import UTPS
def f(x):
return sin(x[0] + cos(x[1])*x[0]) + x[1]*x[0]
x = [UTPS([3,1,0],P=2), UTPS([7,0,1],P=2)]
y = f(x)
print('normal function evaluation y_0 = f(x_0) = ', y.data[0])
print('gradient evaluation g(x_0) = ', y.data[1:])
|
"""Test functions for pem.fluid.ecl module
"""
import pytest
from pytest import approx
import numpy as np
import digirock.fluids.ecl as fluid_ecl
from inspect import getmembers, isfunction
@pytest.fixture
def tol():
return {
"rel": 0.05, # relative testing tolerance in percent
"abs": 0.00001, # absolute testing tolerance
}
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
def test_oil_fvf_table_bad_pchi(test_data):
tab = np.loadtxt(test_data / "PVT_BO.inc")
# test bad extrap
with pytest.raises(ValueError):
assert fluid_ecl.oil_fvf_table(
tab[:, 0], tab[:, 1], 235, extrap="Unknown Extrap"
)
@pytest.mark.parametrize(
"pres, extrap, ans",
[
(325, "const", 1.4615),
(325, "pchip", 1.4615),
(np.r_[325, 375], "const", np.r_[1.4615, 1.4505]),
(np.r_[325, 375], "pchip", np.r_[1.4615, 1.4505]),
],
)
def test_oil_fvf_table(test_data, pres, ans, extrap, tol):
tab = np.loadtxt(test_data / "PVT_BO.inc")
assert np.allclose(
fluid_ecl.oil_fvf_table(tab[:, 0], tab[:, 1], pres, extrap=extrap),
ans,
rtol=tol["rel"],
)
@pytest.mark.parametrize("api,ans", ((20, 0.933993399339934), (45, 0.8016997167138812)))
def test_e100_oil_density(api, ans, tol):
assert fluid_ecl.e100_oil_density(api) == approx(ans)
assert np.allclose(
fluid_ecl.e100_oil_density(np.r_[api, api]), np.r_[ans, ans], atol=tol["abs"]
)
|
# -------------------------------------------------------------------------------------------------
# system
import time
import sys
# -------------------------------------------------------------------------------------------------
# Common
from PyQuantum.Common.Assert import *
# -------------------------------------------------------------------------------------------------
import logging
logging.basicConfig(filename="logs/rank.log", level=logging.DEBUG)
# logging.basicConfig(filename="SparseMatrix/rank.log", level=logging.INFO)
class SparseMatrix:
from PyQuantum.Common.SparseMatrix.Print import Print, print_row, print_rows
from PyQuantum.Common.SparseMatrix.Print import to_csv
from PyQuantum.Common.SparseMatrix.RowsHandler import swap_rows
from PyQuantum.Common.SparseMatrix.RowsHandler import add_row, remove_row, sub_row
from PyQuantum.Common.SparseMatrix.RowsHandler import mult_row, divide_row
from PyQuantum.Common.SparseMatrix.ItemHandler import check_zero
from PyQuantum.Common.SparseMatrix.ItemHandler import add_item, sub_item
from PyQuantum.Common.SparseMatrix.ItemHandler import mult_item, div_item
from PyQuantum.Common.SparseMatrix.ItemHandler import remove, remove_by_jpos, remove_from_heap
def __init__(self, m=0, n=0, orient='row', heap_usage=False):
Assert(m >= 0, "m < 0", cf())
Assert(n >= 0, "n < 0", cf())
self.m = m
self.n = n
self.count = 0
self.row = dict()
self.col = dict()
self.items = dict()
self.ind = dict()
self.orient = orient
self.heap_usage = heap_usage
self.heap = set()
def empty(self):
self.m = 0
self.n = 0
self.count = 0
self.row = dict()
self.col = dict()
self.items = dict()
self.ind = dict()
if self.heap_usage:
self.heap = set()
def add(self, ind, value):
Assert(len(ind) == 2, "len(ind) != 2", cf())
i = ind[0]
j = ind[1]
Assert(i >= 0, "i < 0", cf())
Assert(j >= 0, "j < 0", cf())
if self.n is None:
self.n = j+1
else:
self.n = max(self.n, j+1)
if self.m is None:
self.m = i+1
else:
self.m = max(self.m, i+1)
if value == 0:
return
if self.orient == 'row':
if not (i in self.items):
self.ind[i] = []
self.items[i] = []
# --------------------------------------
inserted = False
for pos, v in enumerate(self.ind[i]):
if j < v:
self.ind[i].insert(pos, j)
self.items[i].insert(pos, value)
inserted = True
break
elif j == v:
self.items[i][pos] = value
inserted = True
break
if not inserted:
self.ind[i].append(j)
self.items[i].append(value)
# --------------------------------------
if i not in self.row:
count = 1
else:
count = self.row[i]['count'] + 1
self.row[i] = {
'ind': self.ind[i],
'items': self.items[i],
'count': count
}
else:
if not (j in self.items):
self.ind[j] = []
self.items[j] = []
self.ind[j].append(i)
self.items[j].append(value)
self.col[i] = {
'ind': self.ind[j],
'items': self.items[j]
}
if self.heap_usage:
self.heap.add(((i, j), value))
self.count += 1
return
def sub_rows(self, i1, i2, jj):
i1_set = set(self.row[i1]['ind'])
i2_set = set(self.row[i2]['ind'])
inter = i1_set & i2_set
diff = i2_set - i1_set
for c in inter:
j_1 = self.row[i1]['ind'].index(c)
j_2 = self.row[i2]['ind'].index(c)
v_2 = self.row[i2]['items'][j_2] * self.row[i1]['items'][jj]
self.sub_item(i1, j_1, v_2, autoremove=False)
for c in diff:
j_2 = c
ind_2 = self.row[i2]['ind'].index(c)
v_2 = self.row[i2]['items'][ind_2]
self.add((i1, j_2), -v_2)
for j_pos in range(len(self.row[i1]['items']))[::-1]:
self.check_zero(i1, None, j_pos)
# if i1 in self.row.keys():
# for j_pos, j in enumerate(self.row[i1]['items']):
# # print(self.row[i1]['items'])
# Assert(self.row[i1]['items'][j_pos] != 0,
# 'nil' + str(self.row[i1]['items']), cf())
if i1 in self.row.keys():
self.row[i1]['count'] = len(self.row[i1]['items'])
size = len(self.row[i1]['items'])
count = self.row[i1]['count']
Assert(len(self.row[i1]['items']) == self.row[i1]
['count'], 'count != size(items):' + str(count) + "!=" + str(size) + str(self.row[i1]['items']), cf())
def get_rows_by_count(self):
rows_by_count = dict()
for k, v in self.row.items():
cnt = v['count']
if cnt not in rows_by_count.keys():
rows_by_count[cnt] = []
rows_by_count[cnt].append(k)
return rows_by_count
def rank(self):
rows_by_count = self.get_rows_by_count()
divs = 0
subs = 0
I = SparseMatrix()
p = 0
# for k in sorted(rows_by_count.keys()):
# print(k, rows_by_count[k])
# for t in rows_by_count[k]:
# print(self.row[t])
while len(rows_by_count) != 0 and self.count != 0:
p += 1
# if p > 100:
# return -1, self, I
for cnt in sorted(rows_by_count.keys()):
for row_i in rows_by_count[cnt]:
if row_i not in self.row:
continue
row = self.row[row_i]
k = row['items'][0]
ind = row['ind'][0]
logging.debug("Divide row " + str(row_i) + ' ' +
str(self.row[row_i]) + " on " + str(k))
if k == 0:
# self.Print(mode='full')
self.info()
if k != 1:
self.divide_row(row_i, k)
divs += 1
# logging.debug(self.Print(mode='full'))
# print('divide ', k)
# print('*'*100)
# self.Print(mode='full')
logging.debug(self.Print(mode='full'))
ind = self.row[row_i]['ind'][0]
for i1 in list(self.row.keys()):
if i1 != row_i and (ind in self.row[i1]['ind']):
ind_j = self.row[i1]['ind'].index(ind)
j_ind = self.row[row_i]['ind'][0]
logging.debug("Sub rows " + str(row_i) + ' -= ' +
str(i1) + " * " + str(ind_j))
self.sub_rows(
i1, row_i, ind_j)
subs += 1
print('sub ', k)
print('*'*100)
self.Print(mode='full')
for s in sorted(rows_by_count.keys()):
for r in rows_by_count[s]:
if r in self.row.keys() and max(self.row[r]['ind']) <= ind:
for j_ind, j in enumerate(self.row[r]['ind']):
I.add((r, j),
self.row[r]['items'][j_ind])
self.remove_row(r)
rows_by_count[cnt].remove(row_i)
# time.sleep(2)
if rows_by_count[cnt] == []:
del rows_by_count[cnt]
# print('divs = ', divs, ', subs = ', subs, sep='')
return (len(self.row)+len(I.row)), self, I
def info(self):
print('-'*100)
print('m:', self.m)
print('n:', self.n)
print()
print('count:', self.count)
if self.count != 0:
print()
if self.orient == 'row':
for i in sorted(self.row.keys()):
print('row ', i, ':', sep='')
print('\tindex:', self.row[i]['ind'])
print('\titems:', self.row[i]['items'])
print()
print('\tcount:', self.row[i]['count'])
elif self.orient == 'col':
for i in sorted(self.col.keys()):
print('col ', i, ':', sep='')
print('\tindex:', self.col[i]['ind'])
print('\titems:', self.col[i]['items'])
print('\tcount:', self.row[i]['count'])
print('-'*100)
print()
return
def __add__(self, k):
if isinstance(k, float):
for i in self.row.keys():
for t in range(len(self.row[i]['items'])):
self.row[i]['items'][t] += k
elif isinstance(k, SparseMatrix):
other = k
ind_A = set()
ind_B = set()
items_A = set()
items_B = set()
all_A = dict()
all_B = dict()
for k, v in self.row.items():
for kj, j in enumerate(v['ind']):
ind_A.add((k, j))
items_A.add(v['items'][kj])
all_A[(k, j)] = v['items'][kj]
for k, v in other.row.items():
for kj, j in enumerate(v['ind']):
ind_B.add((k, j))
items_B.add(v['items'][kj])
all_B[(k, j)] = v['items'][kj]
diff = ind_A ^ ind_B
intersection = ind_A & ind_B
C = SparseMatrix(
m=max(self.m, other.m),
n=max(self.n, other.n),
orient='row'
)
for i in diff:
if i in all_A.keys():
C.add((i[0], i[1]), all_A[(i[0], i[1])])
elif i in all_B.keys():
C.add((i[0], i[1]), all_B[(i[0], i[1])])
for i in intersection:
C.add((i[0], i[1]), all_A[(i[0], i[1])] + all_B[(i[0], i[1])])
return C
return self
def __sub__(self, k):
if isinstance(k, float):
return self.__add__(-k)
elif isinstance(k, SparseMatrix):
return self.__add__(k * -1)
def __mul__(self, k):
for i in self.row.keys():
for t in range(len(self.row[i]['items'])):
self.row[i]['items'][t] *= k
return self
def __truediv__(self, k):
Assert(k != 0, "k == 0", cf())
return self.__mul__(1.0 / k)
|
__title__ = "simulation"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "murlux@protonmail.com"
import copy
import math
from datetime import datetime as dt
class OpenedTrade():
"""An object representing an open trade."""
def __init__(self, type, date, pos):
"""Initate the trade.
:param type: Type of trade
:type type: float
:param date: When the trade was opened
:type date: datetime
:return: A trade
:rtype: trade
"""
self.type = type
self.opened_date = date
self.pos = pos
def __str__(self):
return "{0}\n{1}".format(self.type, self.date)
class ClosedTrade(OpenedTrade):
"""An object representing a closed trade."""
def __init__(self, type, opened_date, closed_date, shares, entry, exit, pos):
"""Initate the trade.
:param type: Type of trade
:type type: float
:param date: When the trade was closed
:type date: datetime
:param shares: Number of shares
:type shares: float
:param entry: Entry price
:type entry: float
:param exit: Exit price
:type exit: float
:return: A trade
:rtype: trade
"""
super().__init__(type, opened_date, pos)
self.closed_date = closed_date
self.shares = float(shares)
self.entry = float(entry)
self.exit = float(exit)
def __str__(self):
return "{0}\n{1}\n{2}\n{3}\n{4}".format(self.type,
self.opened_date,
self.closed_date,
self.shares,
self.entry,
self.exit,)
class Position:
"""A parent object representing a position."""
def __init__(self, no, pair, tf, entry_price, shares, exit_price, stop_loss):
"""Open the position.
:param no: A unique position id number
:type no: float
:param pair: Market pair operating
:type pair: MarketPair
:param tf: Operating timeframe
:type tf: str
:param entry_price: Entry price at which shares are longed/shorted
:type entry_price: float
:param shares: Number of shares to long/short
:type shares: float
:param exit_price: Price at which to take profit
:type exit_price: float
:param stop_loss: Price at which to cut losses
:type stop_loss: float
:return: A position
:rtype: position
"""
self.no = no
self.type = "None"
self.pair = pair
self.tf = tf
self.date = dt.now()
self.entry_price = float(entry_price)
self.shares = float(shares)
self.exit_price = float(exit_price)
self.stop_loss = float(stop_loss)
def __str__(self):
return "{0}\n{1}\n{2}\n{3}\n{4}\n{5}\n{6}\n{7}\n{8}\n{9}".format(self.type,
self.pair,
self.tf,
self.date,
self.shares,
self.entry_price,
self.exit_price,
self.stop_loss,
self.pos)
def show(self):
print("No. {0}".format(self.no))
print("Pair: {0}".format(self.pair))
print("Type: {0}".format(self.type))
print("Entry: {0}".format(self.entry_price))
print("Shares: {0}".format(self.shares))
print("Exit: {0}".format(self.exit_price))
print("Stop: {0}\n".format(self.stop_loss))
def _dict(self):
return {
'id': str(self.no),
'pair': str(self.pair),
'date': str(self.date),
'tf': str(self.tf),
'type': str(self.type),
'entry_price': str(self.entry_price),
'amount': str(self.shares),
'exit_price': str(self.exit_price),
'stop_loss': str(self.stop_loss)
}
class LongPosition(Position):
"""A child object representing a long position."""
def __init__(self, no, pair, tf, entry_price, shares, exit_price=math.inf, stop_loss=0):
"""Open the position.
:param no: A unique position id number
:type no: float
:param pair: Market pair operating
:type pair: MarketPair
:param tf: Operating timeframe
:type tf: str
:param entry_price: Entry price at which shares are longed
:type entry_price: float
:param shares: Number of shares to long
:type shares: float
:param exit_price: Price at which to take profit
:type exit_price: float
:param stop_loss: Price at which to cut losses
:type stop_loss: float
:return: A long position
:rtype: LongPosition
"""
if exit_price is False: exit_price = math.inf
if stop_loss is False: stop_loss = 0
super().__init__(no, pair, tf, entry_price, shares, exit_price, stop_loss)
self.type = 'long'
def close(self, percent, current_price):
"""Close the position.
:param percent: Percent of position size to close
:type percent: float
:param current_price: Closing price
:type current_price: float
:return: Amount of capital gained from closing position
:rtype: float
"""
shares = self.shares
self.shares *= 1.0 - percent
return shares * percent * current_price
def stop_hit(self, current_price):
if current_price <= self.stop_loss:
return(True)
class ShortPosition(Position):
"""A child object representing a short position."""
def __init__(self, no, pair, tf, entry_price, shares, exit_price=0, stop_loss=math.inf):
"""Open the position.
:param no: A unique position id number
:type no: int
:param pair: Market pair operating
:type pair: MarketPair
:param tf: Operating timeframe
:type tf: str
:param entry_price: Entry price at which shares are shorted
:type entry_price: float
:param shares: Number of shares to short
:type shares: float
:param exit_price: Price at which to take profit
:type exit_price: float
:param stop_loss: Price at which to cut losses
:type stop_loss: float
:return: A short position
:rtype: ShortPosition
"""
if exit_price is False: exit_price = 0
if stop_loss is False: stop_loss = math.inf
super().__init__(no, pair, tf, entry_price, shares, exit_price, stop_loss)
self.type = 'short'
def close(self, percent, current_price):
"""Close the position.
:param percent: Percent of position size to close
:type percent: float
:param current_price: Closing price
:type current_price: float
:return: Amount of capital gained from closing position
:rtype: float
"""
entry = self.shares * percent * self.entry_price
exit = self.shares * percent * current_price
self.shares *= 1.0 - percent
if entry - exit + entry <= 0:
return 0
else:
return entry - exit + entry
def stop_hit(self, current_price):
if current_price >= self.stop_loss:
return(True)
|
import Config as cfg
import utils.AlphaVantageUtils as av
import utils.PostgresUtils as pg
def update_price(ticker, interval, size):
count_duplicate = 0
count_create = 0
df_prices = av.get_prices(cfg.AV_APIKEY, ticker, interval, size)
for i in range(0, len(df_prices)):
if not pg.is_price_duplicate(ticker, interval, df_prices[pg._COL_DATETIME][i]):
print(f'{ticker} ({interval}) price on {str(df_prices[pg._COL_DATETIME][i])} created : {df_prices.iloc[i, :].values}')
pg.create_prices([df_prices.iloc[i, :].values])
count_create += 1
else:
print(f'Duplicate {ticker} ({interval}) price on {str(df_prices[pg._COL_DATETIME][i])}')
count_duplicate += 1
return count_create, count_duplicate
# update_price(av._TIC_MICROSOFT, av._INT_DAILY, av._SIZE_COMPACT)
|
# matrix sparseness judgment, convergence judgment, distance product by matrix multiplication, association law for distance product,
# scale-free characteristics of the social networks degrees and the precision limit of floating point operations on the modern hardware.
# 1. DPMM: distance product by MM
# 2. Association law of distance product
# 3. Diameter Limit: scale-free network
# 4. Precision Limit: n^(epochs) < max_float for intermediate value:float32, float64
# 5. Convergence judgement.
# 6. Sparseness judgement
import math
import cupy
import cupyx
import numpy
import scipy
from cupy import cusparse
from scipy.sparse import csr_matrix
class Device:
def __init__(self, config):
self.use = config.device
if 'threshold' in config.__dict__.keys():
self.THRESHOLD = config.threshold
if config.device == 'cpu':
self.device = numpy
self.csr_matrix = scipy.sparse.csr_matrix
else:
self.device = cupy
self.csr_matrix = cupyx.scipy.sparse.csr_matrix
# 1. DPMM: distance product by MM
# 2. Association law of distance product
# 3. Diameter Limit: scale-free network
# 4. Precision Limit: n^(epochs) < max_float for intermediate value:float32, float64
# 5. Convergence judgement.
class APSP(Device):
def __init__(self, matrix, config):
super().__init__(config)
if self.use == 'cpu':
self.adj_matrix =matrix
else:
# load into gpu
self.adj_matrix = cupy.array(matrix)
self.e_max = self.device.max(self.adj_matrix)
self.g_diameter = config.diameter
self.use_dynamic = config.converge_check
self.epsilon = config.lr
print('shape:', self.adj_matrix.shape, 'element_max', self.e_max, 'diameter:', self.g_diameter, 'use_dynamic:', self.use_dynamic)
def stat(self, op):
stat = [len(op[self.device.where(op <= i)]) for i in (1, self.g_diameter, self.e_max)]
print('stat:',(stat[0], stat[1]-stat[0], stat[2]-stat[1]))
return stat
def max(self, op):
print('mmax')
index_op = self.device.where(op < self.e_max)
op_min = self.device.min(op[index_op])
op_max = self.device.max(op[index_op])
print('minv is ', op_min , 'maxv is ', op_max)
return op_max
def exponent(self, op, base, current_maxv):
print('exp')
index_op = self.device.where(op < self.e_max)
rindex_op = self.device.where(op >= self.e_max)
print('expi:',len(op[index_op]),'ri:',len(op[rindex_op]))
op[index_op] = current_maxv - op[index_op]
op[index_op] = self.device.power(base + 1, op[index_op])
op[rindex_op] = 0
print('exp:',op)
return op
def logarithm(self, op, base, current_maxv):
print('log')
index_zero = self.device.where(op>0)
rindex_zero = self.device.where(op==0)
print('logi:', len(op[index_zero]), 'ri:', len(op[rindex_zero]))
op[index_zero] = 2 * current_maxv - self.device.log(op[index_zero]) // self.device.log(base + 1)
op[rindex_zero] = self.e_max
print('log',op)
return op
# 1. DPMM algorithm
def dp(self, op):
print('dp')
m = op.shape[0]
op_max = self.max(op)
op = self.exponent(op, m, op_max)
op = self.device.matmul(op,op)
op = self.logarithm(op, m, op_max)
print('dp:',op)
return op
# 1. DPMM
# 2. Association law for DP.
# 5. Convergence judgement.
def apsp(self, g_diameter=9):
print('apsp')
adj = self.adj_matrix
counter = math.ceil(math.log(g_diameter, 2))
print('LOOP N:',counter)
# 2. Association law
for i in range(counter):
print('loop index:', i)
print('apsp,a:', adj)
# 1. DPMM
wr = self.dp(adj.copy())
print('apsp,b:', wr)
post = self.device.minimum(adj, wr)
print('apsp,c:', adj)
# 5. Convergence judgement
if self.use_dynamic:
print('checking diff:')
equalsum = self.device.sum(self.device.equal(adj, post))
print('equals:', equalsum, "/", self.device.size(adj)," ({}%)".format(equalsum*100.0/ self.device.size(adj)))
if equalsum > (1.0 - self.epsilon) * self.device.size(adj):
print('LOOP EXIT by dynamic decision. at LOOP:', i)
break
adj = post
print('apsp:', adj)
return adj
def apsp_iter(self, g_diameter=9):
print('apsp')
adj = self.adj_matrix
counter = math.ceil(math.log(g_diameter, 2))
print('LOOP N:',counter)
for i in range(counter):
print('loop index:', i)
print('apsp,a:', adj)
wr = self.dp(adj.copy())
print('apsp,b:', wr)
post = self.device.minimum(adj, wr)
print('apsp,c:', adj)
if self.use_dynamic and self.device.all(self.device.equal(adj, post)):
yield adj
print('LOOP EXIT by dynamic decision.')
break
adj = post
yield post
print('apsp:FIN')
# 1. DPMM: distance product by MM
# 2. Association law of distance product
# 3. Diameter Limit: scale-free network
# 4. Precision Limit: n^(epochs) < max_float for intermediate value:float32, float64
# 5. Convergence judgement.
# 6. Sparseness judgement
class APSPPowerLawBound(Device):
def __init__(self,matrix, config):
super().__init__(config)
if self.use == 'cpu':
self.adj_matrix = matrix
else:
# load into gpu
self.adj_matrix = cupy.array(matrix)
self.e_max = self.device.max(self.adj_matrix)
self.g_diameter = config.diameter
self.use_dynamic = config.converge_check
self.use_sparse = config.sparse_check
self.epsilon = config.lr
print('shape:', self.adj_matrix.shape, 'element_max', self.e_max, 'diameter:', self.g_diameter, 'use_dynamic:', self.use_dynamic)
def stat(self, op):
stat = [len(op[self.device.where(op <= i)]) for i in (1, self.g_diameter, self.e_max)]
print('stat:',(stat[0], stat[1]-stat[0], stat[2]-stat[1]))
return stat
def max(self, op):
print('mmax')
index_op = self.device.where(op < self.e_max)
op_min = self.device.min(op[index_op])
op_max = self.device.max(op[index_op])
print('minv is ', op_min , 'maxv is ', op_max)
return op_max
def exponent(self, op, base, current_maxv):
print('exp')
index_op = self.device.where(op < self.e_max)
rindex_op = self.device.where(op >= self.e_max)
print('expi:',len(index_op[0]),'ri:',len(rindex_op[0]))
op[index_op] = current_maxv - op[index_op]
op[index_op] = self.device.power(base + 1, op[index_op])
op[rindex_op] = 0
print('exp:',op)
if self.use_sparse:
self.density = float(len(index_op[0]))/float(len(index_op[0])+len(rindex_op[0]))
return op
def logarithm(self, op, base, current_maxv):
print('log')
index_zero = self.device.where(op>0)
rindex_zero = self.device.where(op==0)
print('logi:', len(index_zero[0]), 'ri:', len(rindex_zero[0]))
op[index_zero] = 2 * current_maxv - self.device.log(op[index_zero]) // self.device.log(base + 1)
op[rindex_zero] = self.e_max
print('log',op)
return op
# 1. DPMM
# 6. Sparseness judgement
def dp(self, op):
print('dp')
m = op.shape[0]
op_max = self.max(op)
op = self.exponent(op, m, op_max)
if self.use_sparse:
print('dense', self.density)
# check op is dense or not, within THRESHOLD such as 10% sparse, then decide to use MM or SPMM.
if self.use_sparse and self.density < self.THRESHOLD:
sop = self.csr_matrix(op)
print('sparse nnz:', sop.nnz)
if self.use == 'cpu':#cpu use @ after python 3
sop = sop @ sop
else:#gpu use cusparse.csrgemm
sop = cusparse.csrgemm(sop, sop)
print('sparse nnz2:', sop.nnz)
op = sop.todense()
else:
op = self.device.matmul(op, op)
op = self.logarithm(op, m, op_max)
print('dp:',op)
return op
# 1. DPMM
# 2. Association law for DP.
# 5. Convergence judgement.
# 6. Sparseness judgement.
def apsp(self, g_diameter=9):
print('apsp')
adj = self.adj_matrix
counter = math.ceil(math.log(g_diameter, 2))
print('LOOP N:',counter)
for i in range(counter):
print('loop index:', i)
print('apsp,a:', adj)
wr = self.dp(adj.copy())
print('apsp,b:', wr)
post = self.device.minimum(adj, wr)
print('apsp,c:', adj)
if self.use_dynamic:
print('checking diff:')
equalsum = self.device.sum(self.device.equal(adj, post))
print('equals:', equalsum, "/", self.device.size(adj)," ({}%)".format(equalsum*100.0/ self.device.size(adj)))
if equalsum > (1.0 - self.epsilon) * self.device.size(adj):
print('LOOP EXIT by dynamic decision. at LOOP:', i)
break
adj = post
print('apsp:', adj)
return adj
def apsp_iter(self, g_diameter=9):
print('apsp')
adj = self.adj_matrix
counter = math.ceil(math.log(g_diameter, 2))
print('LOOP N:',counter)
for i in range(counter):
print('loop index:', i)
print('apsp,a:', adj)
wr = self.dp(adj.copy())
print('apsp,b:', wr)
post = self.device.minimum(adj, wr)
print('apsp,c:', adj)
if self.use_dynamic and self.device.all(self.device.equal(adj, post)):
yield adj
print('LOOP EXIT by dynamic decision.')
break
adj = post
yield post
print('apsp:FIN')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.