content
stringlengths
5
1.05M
n = int(input()) print(pow(3, n, 2**31 -1))
import pytest from dagster import InitResourceContext, build_init_resource_context, resource from dagster.core.errors import DagsterInvariantViolationError def test_build_no_args(): context = build_init_resource_context() assert isinstance(context, InitResourceContext) @resource def basic(_): return "foo" assert basic(context) == "foo" def test_build_with_resources(): @resource def foo(_): return "foo" context = build_init_resource_context(resources={"foo": foo, "bar": "bar"}) assert context.resources.foo == "foo" assert context.resources.bar == "bar" @resource(required_resource_keys={"foo", "bar"}) def reqs_resources(context): return context.resources.foo + context.resources.bar assert reqs_resources(context) == "foobar" def test_build_with_cm_resource(): entered = [] @resource def foo(_): try: yield "foo" finally: entered.append("true") @resource(required_resource_keys={"foo"}) def reqs_cm_resource(context): return context.resources.foo + "bar" context = build_init_resource_context(resources={"foo": foo}) with pytest.raises(DagsterInvariantViolationError): context.resources # pylint: disable=pointless-statement del context assert entered == ["true"] with build_init_resource_context(resources={"foo": foo}) as context: assert context.resources.foo == "foo" assert reqs_cm_resource(context) == "foobar" assert entered == ["true", "true"]
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base classes of solver.""" import abc import math import delta.compat as tf import tensorflow_addons as tfa from absl import logging from delta import utils from delta.utils import optimizer from delta.utils.register import registers # pylint: disable=abstract-method class ABCSolver(metaclass=abc.ABCMeta): """Abstract class of solver.""" @abc.abstractmethod def process_config(self, config): """Process the configs.""" raise NotImplementedError() @abc.abstractmethod def input_fn(self, mode): """Get the input function.""" raise NotImplementedError() @abc.abstractmethod def model_fn(self): """Get the model function.""" raise NotImplementedError() @abc.abstractmethod def get_loss_fn(self): """Get the loss function.""" raise NotImplementedError() @abc.abstractmethod def get_learning_rate(self): """Get the learning rate.""" raise NotImplementedError() @abc.abstractmethod def get_optimizer(self): """Get the optimizer.""" raise NotImplementedError() @abc.abstractmethod def get_apply_gradients_op(self): """Get the apply gradients operator.""" raise NotImplementedError() @abc.abstractmethod def get_train_op(self): """Get the training operator.""" raise NotImplementedError() @abc.abstractmethod def get_saver(self): """Get the saver.""" raise NotImplementedError() @abc.abstractmethod def get_scaffold(self): """Get the scaffold.""" raise NotImplementedError() @abc.abstractmethod def train(self): """Train the model.""" raise NotImplementedError() @abc.abstractmethod def eval(self): """Evaluate the model.""" raise NotImplementedError() @abc.abstractmethod def infer(self): """Make a inference.""" raise NotImplementedError() @abc.abstractmethod def train_and_eval(self): """Train and evaluate.""" raise NotImplementedError() @abc.abstractmethod def export_model(self): """Export model to tensorflow SavedModel.""" raise NotImplementedError() class Solver(ABCSolver): """Base class of solver.""" def __init__(self, config): super().__init__() self._config = self.process_config(config) self._task = None @property def config(self): """Get the config.""" return self._config def input_fn(self, mode): """Get the input function. return a Task class """ task_name = self.config['data']['task']["name"] self._task = registers.task[task_name](self.config, mode) return self._task @property def task(self): """Get the task.""" return self._task def model_fn(self): ''' return Model class ''' classname = self.config['model']['name'] logging.info("__name__=%s\tclassname==%s", __name__, classname) # Model initialization model = registers.model[classname](self.config) return model def get_loss_fn(self): """Get the loss function.""" return utils.misc.losses(self.config) def get_learning_rate(self): """Get the learning rate.""" lrconf = self.config['solver']['optimizer']['learning_rate'] learning_rate = lrconf['rate'] learning_type = lrconf['type'] #pylint: disable=invalid-name if learning_type == 'exp_decay': lr = tf.train.exponential_decay( learning_rate, tf.train.get_or_create_global_step(), lrconf['decay_steps'], lrconf['decay_rate'], staircase=True) elif learning_type == 'piecewise': #boundaries = [15000, 30000] #values = [1e-3, 1e-4, 1e-5] boundaries = lrconf['boundaries'] values = lrconf['values'] assert len(values) == len( boundaries) + 1, 'values len must equal boundaries len plus one' lr = tf.train.piecewise_constant( tf.train.get_or_create_global_step(), boundaries=boundaries, values=values) elif learning_type == 'warmup': learning_rate = tf.constant( value=learning_rate, shape=[], dtype=tf.float32) global_step = tf.train.get_or_create_global_step() data_size = self.config['data']['train_data_size'] num_epochs = self.config["data"]["task"]['epochs'] batch_size = self.config["data"]["task"]['batch_size'] num_batch = int(math.ceil(data_size * num_epochs / batch_size)) learning_rate = tf.train.polynomial_decay( learning_rate, global_step, num_batch, end_learning_rate=0.0, power=1.0, cycle=False) global_steps_int = tf.cast(global_step, tf.int32) warmup_steps_int = tf.constant(lrconf['num_warmup_steps'], dtype=tf.int32) global_steps_float = tf.cast(global_steps_int, tf.float32) warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_learning_rate = learning_rate * warmup_percent_done is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) lr = ((1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) elif learning_type == 'const': lr = learning_rate else: raise ValueError( "Not support learning rate type: {}".format(learning_type)) tf.summary.scalar('lr', lr) return lr #pylint: disable=arguments-differ def get_optimizer(self): """Get the optimizer.""" optconf = self.config['solver']['optimizer'] method = optconf['name'] learning_rate = self.get_learning_rate() if method == 'adadelta': opt = tf.train.AdadeltaOptimizer(learning_rate) elif method == 'adam': opt = tf.train.AdamOptimizer(learning_rate) elif method == 'adagrad': opt = tf.train.AdagradOptimizer(learning_rate) elif method == 'momentum': opt = tf.train.MomentumOptimizer(learning_rate, momentum=0.9) elif method == 'rmsprop': opt = tf.train.RMSPropOptimizer(learning_rate) elif method == 'gradientdecent': opt = tf.train.GradientDescentOptimizer(learning_rate) elif method == 'lazyadam': opt = tfa.optimizers.LazyAdam(learning_rate) elif method == 'weightedadam': weight_decay = self.config['solver']['optimizer']['weight_decay'] opt = tfa.optimizers.AdamW( weight_decay=weight_decay, learning_rate=learning_rate) elif method == 'yellowfin': opt = optimizer.YFOptimizer(learning_rate) else: raise ValueError("Not support optimizer: {}".format(method)) return opt #pylint: disable=no-self-use def clip_gradients(self, grads_and_vars, clip_ratio): """Clip the gradients.""" is_zip_obj = False if isinstance(grads_and_vars, zip): grads_and_vars = list(grads_and_vars) is_zip_obj = True with tf.variable_scope('grad'): for grad, var in grads_and_vars: if grad is not None: tf.summary.histogram(var.name[:-2], grad) else: logging.debug('%s gradient is None' % (var.name)) # not clip if not clip_ratio: if is_zip_obj: grads, variables = zip(*grads_and_vars) grads_and_vars = zip(grads, variables) return grads_and_vars gradients, variables = zip(*grads_and_vars) clipped, global_norm = tf.clip_by_global_norm(gradients, clip_ratio) grad_and_var_clipped = zip(clipped, variables) tf.summary.scalar('gradient/global_norm', global_norm) return grad_and_var_clipped def get_apply_gradients_op(self, loss, global_step=None): """Get Apply gradients operator.""" opt = self.get_optimizer() grads_and_vars = opt.compute_gradients(loss) # clip gradient optconf = self.config['solver']['optimizer'] global_norm = optconf['clip_global_norm'] grads_and_vars = self.clip_gradients(grads_and_vars, global_norm) apply_gradient_op = opt.apply_gradients( grads_and_vars, global_step=global_step or tf.train.get_or_create_global_step()) return apply_gradient_op def get_var_avg_ema(self, decay, global_step=None): ''' make var average ema ''' return tf.train.ExponentialMovingAverage( decay, global_step or tf.train.get_or_create_global_step()) def make_restore_average_vars_dict(self, global_step=None): ''' using vars_average to restotre vars''' model_avg_conf = self.config['solver']['model_average'] var_avg_decay = model_avg_conf['var_avg_decay'] var_restore_dict = {} variable_averages = self.get_var_avg_ema(var_avg_decay, global_step) for var in tf.global_variables(): if var in tf.trainable_variables(): name = variable_averages.average_name(var) else: name = var.op.name var_restore_dict[name] = var return var_restore_dict def var_avg(self, global_step=None): ''' average model variables, add average_op to UPDATES_OPS''' model_avg_conf = self.config['solver']['model_average'] var_avg_model = model_avg_conf['enable'] if var_avg_model: var_avg_decay = model_avg_conf['var_avg_decay'] variable_averages = self.get_var_avg_ema(var_avg_decay, global_step) apply_op = variable_averages.apply(tf.trainable_variables()) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, apply_op) utils.log_vars('Avg Trainable Vars', tf.trainable_variables()) def get_train_op(self, loss, global_step=None): """Get the training operator.""" apply_gradient_op = self.get_apply_gradients_op(loss, global_step) # model average self.var_avg(global_step) # model average after apply gradients with tf.control_dependencies([apply_gradient_op]): update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(*update_ops) utils.log_vars('moving vars', tf.moving_average_variables()) return train_op def get_saver(self, global_step=None): """Get the saver.""" solverconf = self.config['solver'] max_to_keep = solverconf['saver']['max_to_keep'] model_avg_conf = self.config['solver']['model_average'] model_average = model_avg_conf['enable'] if model_average: var_avg_decay = model_avg_conf['var_avg_decay'] variable_averages = self.get_var_avg_ema(var_avg_decay, global_step) variable_to_restore = variable_averages.variables_to_restore() logging.info('Restore: name to var : {}'.format(variable_to_restore)) saver = tf.train.Saver(variable_to_restore, max_to_keep=max_to_keep) logging.info('Restore vars from moving variables') else: saver = tf.train.Saver(max_to_keep=max_to_keep) return saver def get_scaffold(self, mode, global_step=None): """Get the scaffold.""" if mode != utils.TRAIN: # for model average saver = self.get_saver(global_step) scaffold = tf.train.Scaffold(saver=saver) else: scaffold = None # default return scaffold class ABCEstimatorSolver(Solver): """Abstract solver using tensorflow Esitimator.""" @abc.abstractmethod def create_estimator(self): ''' create tf.estimator.Estimator obj''' raise NotImplementedError() @abc.abstractmethod def get_train_hooks(self, labels, logits, alpha=None): ''' return train_hooks ''' raise NotImplementedError() @abc.abstractmethod def get_eval_hooks(self, labels, logits): ''' return eval_hooks, eval_metric_ops ''' raise NotImplementedError() @abc.abstractmethod def get_infer_predictions(self): ''' get infer predictions output''' raise NotImplementedError() @abc.abstractmethod def create_serving_input_receiver_fn(self): ''' input pipeline when export model ''' raise NotImplementedError() @abc.abstractmethod def postproc_fn(self): ''' postprocess of predictions''' raise NotImplementedError()
## Read input as specified in the question. ## Print output as specified in the question. n = int(input()) li = [] sum = 0 li = [int(x) for x in input().split()] for ele in li: sum = sum + ele print(sum)
import platform import os import subprocess import requests from PyQt5 import uic from PyQt5.QtCore import pyqtSlot, Qt, QPoint, QFileInfo, QModelIndex, QRegExp from PyQt5.QtWidgets import QFileDialog, QMenu, QAction, QWidget, QTableView, QHeaderView, QTableWidget from PyQt5.QtGui import QRegExpValidator from gui.Worker import Worker from gui.DownloadsTableModel import DownloadsTableModel from gui.ProgressBarDelegate import ProgressBarDelegate from gui.Utils import CustomRole from gui.CancelDialog import CancelDialog from gui.Worker import DownloadStatus class DownloadPage(QWidget): # Handy links for testing # does not have content-length header: # https://github.com/ClaudiaRaffaelli/Cindy-s-Bad-Luck-BLS-VR/archive/refs/tags/v1.0.2.zip # does have content-length header: # https://github.com/ClaudiaRaffaelli/Cindy-s-Bad-Luck-BLS-VR/releases/download/v1.0.2/BLS.apk # https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/close-up-of-cat-wearing-sunglasses-while-sitting-royalty-free-image-1571755145.jpg def __init__(self, parent=None): super(DownloadPage, self).__init__(parent) uic.loadUi("gui/ui/downloadWidget.ui", self) # creating the table view for the downloads and the model that holds the data self.downloadsTableModel = DownloadsTableModel() self.downloadsTableView = QTableView() self.downloadsTableView.setSortingEnabled(True) self.downloadsTableView.setSelectionBehavior(QTableView.SelectRows) # Init table view self.downloadsTableView.setModel(self.downloadsTableModel) self.downloadsTableView.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive) self.downloadsTableView.horizontalHeader().setStretchLastSection(True) # the fields in the table are non editable self.downloadsTableView.setEditTriggers(QTableWidget.NoEditTriggers) self.mainLayout.addWidget(self.downloadsTableView) # connecting the signal for the context menu on right click on download table row self.downloadsTableView.setContextMenuPolicy(Qt.CustomContextMenu) self.downloadsTableView.customContextMenuRequested.connect(self.context_menu_triggered_downloads_table) # connecting the signal for one click on a row self.downloadsTableView.clicked.connect(self.row_click) # where the file will be saved self.savingLocation = "" # current threads of download self.downloadWorkerThreads = [] # validation of the input in text line for generic URLs: reg_ex = QRegExp("https?:\/\/[-a-zA-Z0-9@:%._\+~#=\/]{2,256}") input_line_edit_validator = QRegExpValidator(reg_ex, self.urlLineEdit) self.urlLineEdit.setValidator(input_line_edit_validator) # number of checked rows (this will activate if >0 or deactivate if ==0 the buttons of Start, Pause, Cancel self.numCheckedRows = 0 # delegate to paint the progress bars delegate = ProgressBarDelegate(self.downloadsTableView) self.downloadsTableView.setItemDelegateForColumn(5, delegate) @pyqtSlot() def start_individual_download(self): # taking the url from the lineEdit url = self.urlLineEdit.text() self.init_download(url, saving_location="") def init_download(self, url, saving_location): """ This method can be called both from the DownloadPage when the user presses the start button and from the main. In this latter case it is because an un-finished download from a previous opening of the program needs to be re-loaded in the download table view. :param url: is the url of download :param saving_location: is where the file will be saved. This string is full only if the download is re-loaded from the json of history """ # if the saving location is not passed as input then we are not loading data from the json but, from user input if saving_location == "": # we need to check if there is already a file with this name existing. In this case we ask the user to # insert a different file name # also if the user does not set a name we check if the default one is already existing if os.path.exists(self.savingLocation) or \ (os.path.exists("./Downloads/" + url.split('/')[-1]) and self.savingLocation == ""): self.errorLabel.setText("This file is already existing, please choose another name") return else: self.errorLabel.setText("") # checking if this url is downloadable and in case ask the user to change it try: requests.head(url).headers except: self.errorLabel.setText("This URL is non-downloadable, please choose another one") return # creating the worker that will download worker = Worker() # putting the worker for this download in the array of worker threads self.downloadWorkerThreads.append(worker) # Connecting all the signals of the thread. # Adding the download to the model in order to display it in the table view with initial data worker.download_starting.connect(self.downloadsTableModel.add_download_to_table) worker.download_started.connect(self.downloadsTableModel.init_row) # This signal will be used to update the table model worker.download_update.connect(self.downloadsTableModel.update_data_to_table) # this signal will be used to know when the download is over worker.download_completed.connect(self.downloadsTableModel.completed_row) # this signal will be used to set the row in table model as paused or aborted worker.download_interrupted.connect(self.downloadsTableModel.interrupted_row) # this signal will be used to set the row as re-started a delete worker.download_restarted.connect(self.downloadsTableModel.restarted_row) if saving_location == "": # starting the worker assigning an ID worker.init_download(thread_id=len(self.downloadWorkerThreads) - 1, filepath=self.savingLocation, url=url, start=True) else: # init the worker assigning an ID but not starting it, and setting the filepath from the json file worker.init_download(thread_id=len(self.downloadWorkerThreads) - 1, filepath=saving_location, url=url, start=False) # if this is the first time we start a download (there are no rows) we activate the buttons if self.numCheckedRows == 0: self.cancelSelectedDownloadButton.setEnabled(True) self.pauseSelectedDownloadButton.setEnabled(True) self.startSelectedDownloadButton.setEnabled(True) self.numCheckedRows += 1 # resetting the name of download self.savingLocation = "" @pyqtSlot() def choose_location_save(self): dialog = QFileDialog(self, "Choose location") dialog.setOption(QFileDialog.DontUseNativeDialog, True) dialog.setOption(QFileDialog.DontResolveSymlinks, True) dialog.setFileMode(QFileDialog.AnyFile) dialog.setDirectory("./Downloads/") # as default the downloaded file will be called with the original file name but it can be changed by the user url = self.urlLineEdit.text() filename = url.split('/')[-1] self.savingLocation = dialog.getSaveFileName(self, "Choose file name", filename)[0] @pyqtSlot() def start_resume_download(self): # resume all the checked downloads if not already running nor already completed checked_rows = self.downloadsTableModel.get_all_checked_rows() for row in checked_rows: if (not self.downloadWorkerThreads[row].thread.isRunning()) \ and (not self.downloadWorkerThreads[row].status == DownloadStatus.complete): self.downloadWorkerThreads[row].restart_download() @pyqtSlot() def pause_download(self): # pauses all the checked downloads if not already paused checked_rows = self.downloadsTableModel.get_all_checked_rows() for row in checked_rows: if self.downloadWorkerThreads[row].thread.isRunning(): # pause the download at row if it is running, asking the worker to pause self.downloadWorkerThreads[row].status = DownloadStatus.pause self.interrupt_row(row) def interrupt_row(self, row): # status indicates how the row needs to be interrupted (if paused or aborted) self.downloadWorkerThreads[row].thread.requestInterruption() self.downloadWorkerThreads[row].thread.wait(2000) @pyqtSlot() def cancel_download(self): # asking the user if they really want to cancel all progress for selected downloads. cancel_dialog = CancelDialog(None) result = cancel_dialog.exec() if result: # if the user presses Yes we delete the downloads checked_rows = self.downloadsTableModel.get_all_checked_rows() # obtain the path to the file for all checked_rows downloads and delete the file for row in checked_rows: if self.downloadWorkerThreads[row].thread.isRunning(): # setting for the thread the request to abort self.downloadWorkerThreads[row].status = DownloadStatus.abort self.interrupt_row(row) else: # it means that the download was paused or concluded and we only have to reset the table model self.downloadsTableModel.interrupted_row(row, DownloadStatus.abort) # If we decide to start again the download (which is possible since that the url is still kept) # the progress of download to zero is resetted from the worker self.downloadWorkerThreads[row].status = DownloadStatus.idle # then try to delete the file try: os.remove(self.downloadsTableModel.get_full_path(row)) except: print("Could not delete file {}".format(self.downloadsTableModel.get_full_path(row))) # if result is True it means that the user has pressed No or the x on the corner of the dialog, # as default the downloads are not deleted @pyqtSlot() def parse_url(self): # enabling the choose save location and start download buttons when there is a URL with a valid start text if self.urlLineEdit.text().startswith(("http://", "https://")) and \ self.startIndividualDownloadButton.isEnabled() is False: self.startIndividualDownloadButton.setEnabled(True) self.chooseLocationButton.setEnabled(True) elif not self.urlLineEdit.text().startswith(("http://", "https://")) and \ self.startIndividualDownloadButton.isEnabled() is True: self.startIndividualDownloadButton.setEnabled(False) self.chooseLocationButton.setEnabled(False) @pyqtSlot(QPoint) def context_menu_triggered_downloads_table(self, clickpoint): index = self.downloadsTableView.indexAt(clickpoint) if index.isValid(): context = QMenu(self) finder = "Show in Explorer" if platform.system() == "Linux": finder = "Reveal in File Explorer" elif platform.system() == "Darwin": finder = "Reveal in Finder" openExplorer = QAction(finder, self) context.addActions([openExplorer]) openExplorer.triggered.connect(self.open_explorer_item) context.exec(self.downloadsTableView.mapToGlobal(clickpoint)) @pyqtSlot() def open_explorer_item(self): index = self.downloadsTableView.selectionModel().currentIndex() currentItem = self.downloadsTableModel.itemFromIndex(self.downloadsTableModel.index(index.row(), 0)) info = QFileInfo(currentItem.data(Qt.UserRole + CustomRole.full_path)) # revealing in Finder / Explorer / Nautilus the selected file if info.isDir(): filepath = info.canonicalFilePath() else: filepath = info.canonicalPath() try: os.startfile(filepath) except: try: subprocess.Popen(["xdg-open", filepath]) except: subprocess.call(["open", "-R", filepath]) @pyqtSlot(QModelIndex) def row_click(self, index): # single click on first column, the checkbox state is changed if index.isValid() and index.column() == 0: # the return is a boolean that tells if the row now is checked or unchecked is_checked = self.downloadsTableModel.toggle_checkbox(index) # there is a one more checked row now and we increment the counter if is_checked is True: # if before there wasn't any row selected, we have to enable this buttons if self.numCheckedRows == 0: self.cancelSelectedDownloadButton.setEnabled(True) self.pauseSelectedDownloadButton.setEnabled(True) self.startSelectedDownloadButton.setEnabled(True) self.numCheckedRows += 1 else: self.numCheckedRows -= 1 # deactivating the buttons of Start, Pause, Cancel because there are no selected rows of downloads if self.numCheckedRows == 0: self.cancelSelectedDownloadButton.setEnabled(False) self.pauseSelectedDownloadButton.setEnabled(False) self.startSelectedDownloadButton.setEnabled(False)
# -*- coding:utf-8 -*- import pika connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) # ็›ธๅฝ“ไบŽๅปบ็ซ‹ไธ€ไธชsocket่ฟžๆŽฅ channel = connection.channel() # ๅฃฐๆ˜Žqueue channel.queue_declare(queue='hello') # RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange. import time while True: channel.basic_publish(exchange="", routing_key='hello', body=u'ไฝ ๅฅฝ๏ผ') time.sleep(1) print(u" ๅ‘้€ โ€˜ไฝ ๅฅฝ๏ผโ€˜") connection.close()
import pytest from boardfarm.orchestration import TestStep as TS from boardfarm.tests.bft_base_test import BftBaseTest from unittest2 import TestCase # Assumption1 : later test won't define runTest # Assumption2 : BF pytest pluging won't call a wrapper to execute runTest # Assumption3 : BF pytest will directly used test prefixed methods class BfPyTestCls(TestCase, BftBaseTest): def __init__(self, *args, **kwargs): # This is just to ensure the pytest runs with BF # this is done so that base prefixed/suffixed test method don't run TestCase.__init__(self, *args, **kwargs) BftBaseTest.__init__(self, None, None, None) class test_orchestration(BfPyTestCls): # this will always raise a ValueError exception def action(self, code=200): e = ValueError("Intended Exception") e.code = code raise e def test_positive_scenario1(self): with TS(self, "Negative Test Scenario 1", type(self).__name__) as ts: with ts.assertRaises(ValueError) as e: ts.call(self.action) exc = e.exception assert exc.code == 200 def test_positive_scenario2(self): with TS(self, "Negative Test Scenario 2", type(self).__name__) as ts: exc = ts.assertRaises(ValueError, self.action, code=300).exception assert exc.code == 300 def test_positive_scenario3(self): # should be able to continue remaining execution after with, if any. with TS(self, "Negative Test Scenario 3", type(self).__name__) as ts: with ts.assertRaises(ValueError) as e: ts.call(self.action) ts.call(print, "Yes I work") exc = e.exception assert exc.code == 200 def test_positive_scenario4(self): # should be able to continue remaining execution after with, if any. with TS(self, "Negative Test Scenario 4", type(self).__name__) as ts: exc = ts.assertRaises(ValueError, self.action, code=300).exception ts.call(print, "Yes I work") assert exc.code == 300 def test_negative_scenario1(self): # this scenario will throw an exception Code Error with pytest.raises(AssertionError) as exc: with TS(self, "Negative Test Scenario 5", type(self).__name__) as ts: with ts.assertRaises(KeyError): ts.call(self.action) assert exc.type is AssertionError def test_negative_scenario2(self): # this scenario will throw an exception Code Error with pytest.raises(AssertionError) as exc: with TS(self, "Negative Test Scenario 6", type(self).__name__) as ts: ts.assertRaises(KeyError, self.action, code=100) assert exc.type is AssertionError def test_negative_scenario3(self): # this scenario will throw an exception Code Error as no exception got raised with pytest.raises(AssertionError) as exc: with TS(self, "Negative Test Scenario 7", type(self).__name__) as ts: with ts.assertRaises(KeyError): ts.call(print, "I won't throw an exception") assert "No exception caught" in str(exc.value) def test_negative_scenario4(self): # this scenario will throw an exception Code Error as no exception got raised with pytest.raises(AssertionError) as exc: with TS(self, "Negative Test Scenario 8", type(self).__name__) as ts: ts.assertRaises(KeyError, print, "I won't throw an exception") assert "No exception caught" in str(exc.value)
"""Prepare a binned matrix of misalignments and plot it in different ways""" import click import pysam import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.path import Path import matplotlib.patches as patches from matplotlib.colors import LogNorm import numpy as np def we_have_too_many_bins(bins): return sum([len(bb) for bb in bins]) > 5000 # This is our threshold for too many bins to compute def autoscale_bin_size(chrom_lens, bin_cnt=100.0): return int(sum(chrom_lens) / bin_cnt) def compute_misalignment_matrix_from_bam(bam_fp, bin_size=None, i_know_what_i_am_doing=False): """Create a matrix of binned mis-alignments :param bam_fp: input BAM :param bin_size: size of bin in mega bases :param i_know_what_i_am_doing: Set this to override the runtime warning of too many bins """ def binnify(_pos, _bins): for n in range(1, len(_bins)): if _pos < _bins[n]: return n - 1 return len(_bins) - 1 # Should not get here chrom_lens = [hdr['LN'] for hdr in bam_fp.header['SQ']] bin_size = bin_size * 1e6 if bin_size is not None else autoscale_bin_size(chrom_lens) bins = [np.array(range(0, hdr['LN'], bin_size) + [hdr['LN']], dtype=int) for hdr in bam_fp.header['SQ']] if not i_know_what_i_am_doing and we_have_too_many_bins(bins): raise RuntimeWarning('The number of bins will be very large. ' 'If you are sure you want to do this, ' 'use the --i-know-what-i-am-doing flag.') bin_centers = [(bb[:-1] + bb[1:]) / 2.0 for bb in bins] # Rows = source (correct pos) Cols = destination (aligned pos) matrices = [[np.zeros(shape=(len(bins[j]) - 1, len(bins[i]) - 1), dtype='uint32') for i in range(len(bins))] for j in range(len(bins))] # TAG TYPE VALUE # XR i Aligned chromosome # XP i Aligned pos for r in bam_fp: c_chrom, c_pos, a_chrom, a_pos = r.reference_id, r.pos, r.get_tag('XR'), r.get_tag('XP') c_pos_binned, a_pos_binned = binnify(c_pos, bins[c_chrom]), binnify(a_pos, bins[a_chrom]) matrices[c_chrom][a_chrom][c_pos_binned, a_pos_binned] += 1 return chrom_lens, bins, bin_centers, matrices def plot_genome_as_a_circle(ax, chrom_lens, chrom_gap=np.pi / 50, chrom_radius=1.0, chrom_thick=5, r_max=1.05): """Plot the chromosomes on a circle.""" total_len = sum(chrom_lens) radians_per_base = (2.0 * np.pi - len(chrom_lens) * chrom_gap) / total_len # With allowance for chrom gaps theta_stops, x_ticks, x_tick_labels = [], [], [] delta_radian = 0.01 start_radian = 0 for ch_no, l in enumerate(chrom_lens): end_radian = start_radian + l * radians_per_base theta = np.arange(start_radian, end_radian, delta_radian) theta_stops.append((start_radian, end_radian)) ax.plot(theta, [chrom_radius * 1.01] * theta.size, lw=chrom_thick, zorder=-1) # , color=[.3, .3, .3]) x_ticks.append((start_radian + end_radian)/2) x_tick_labels.append(str(ch_no + 1)) start_radian = end_radian + chrom_gap plt.setp(ax.get_yticklabels(), visible=False) ax.grid(False) plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels) ax.set_rmax(r_max) return theta_stops def plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=0.01): scaling_factor *= 0.01 # http://matplotlib.org/users/path_tutorial.html codes = [ Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4, ] for i in range(len(bins)): for j in range(len(bins)): mat = matrices[i][j] range_bp_origin, range_bp_dest = float(chrom_lens[i]), float(chrom_lens[j]) offset_origin, offset_dest = theta_stops[i][0], theta_stops[j][0] range_origin, range_dest = theta_stops[i][1] - theta_stops[i][0], theta_stops[j][1] - theta_stops[j][0] scale_origin, scale_dest = range_origin / range_bp_origin, range_dest / range_bp_dest c_origin, c_dest = offset_origin + bin_centers[i] * scale_origin, offset_dest + bin_centers[j] * scale_dest this_origin, this_dest = np.tile(c_origin, c_dest.shape[0]), np.repeat(c_dest, c_origin.shape[0]) mat_flat = mat.ravel() idx, = mat_flat.nonzero() for ii in idx: t0, t1 = this_origin[ii], this_dest[ii] this_radius = max(min(1.0, abs(t1 - t0) / np.pi), 0.05) * chrom_radius vertices = [ (t0, chrom_radius), # P0 (t0, chrom_radius - this_radius), # P1 (t1, chrom_radius - this_radius), # P2 (t1, chrom_radius), # P3 ] path = Path(vertices, codes) patch = patches.PathPatch(path, facecolor='none', lw=scaling_factor * mat_flat[ii]) ax.add_patch(patch) def circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor): """Plot the confusion matrix as a circle plot.""" fig = plt.figure() ax = fig.add_subplot(111, polar=True) theta_stops = plot_genome_as_a_circle(ax, chrom_lens) plot_read_mis_alignments_on_a_circle(ax, chrom_lens, bins, bin_centers, matrices, theta_stops, chrom_radius=1.0, scaling_factor=scaling_factor) def plot_genome_as_a_square(ax, bins, chrom_gap=1000, chrom_thick=5): """Plot the chromosomes on a matrix.""" start_pos, linear_stops, x_ticks, x_tick_labels = chrom_gap, [], [], [] for ch_no, b in enumerate(bins): linear_stops.append([start_pos, start_pos + b[-1]]) ax.plot([x + start_pos for x in b], [0 for _ in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1) ax.plot([0 for _ in b], [x + start_pos for x in b], color='k' if ch_no % 2 else 'gray', lw=chrom_thick, zorder=-1) x_ticks.append((start_pos + start_pos + b[-1]) / 2) x_tick_labels.append(str(ch_no + 1)) start_pos += b[-1] + chrom_gap #plt.setp(ax.get_yticklabels(), visible=False) ax.grid(False) plt.setp(ax, xticks=x_ticks, xticklabels=x_tick_labels, yticks=x_ticks, yticklabels=x_tick_labels) return linear_stops def plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops, scaling_factor=1.0, plot_grid=True): for i in range(len(bins)): for j in range(len(bins)): mat = matrices[i][j] range_bp_x, range_bp_y = float(chrom_lens[i]), float(chrom_lens[j]) offset_x, offset_y = linear_stops[i][0], linear_stops[j][0] range_x, range_y = linear_stops[i][1] - linear_stops[i][0], linear_stops[j][1] - linear_stops[j][0] scale_x, scale_y = range_x / range_bp_x, range_y / range_bp_y cx, cy = offset_x + bin_centers[i] * scale_x, offset_y + bin_centers[j] * scale_y this_x, this_y = np.tile(cx, cy.shape[0]), np.repeat(cy, cx.shape[0]) if plot_grid: ax.plot(this_x, this_y, '.', color=(0.8, 0.8, 0.8), ms=2, zorder=-1) mat_flat = mat.ravel() idx, = mat_flat.nonzero() if idx.size > 0: ax.scatter(this_x[idx], this_y[idx], mat_flat[idx] * scaling_factor, facecolors='none') def matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=True): """Plot the confusion matrix as a ... matrix.""" fig = plt.figure() ax = fig.add_subplot(111) linear_stops = plot_genome_as_a_square(ax, bins, chrom_gap=max(chrom_lens) * 0.1) plot_read_mis_alignments_as_a_matrix(ax, chrom_lens, bins, bin_centers, matrices, linear_stops, scaling_factor=scaling_factor, plot_grid=plot_grid) plt.setp(ax, aspect=1, xlabel='Correct', ylabel='Aligned') def is_grid_too_dense(bins): return sum([len(bb) for bb in bins]) > 100 # This is our threshold for too dense a grid to show def auto_scale_scaling_factor(matrices, scale=1000.0): return scale / max([matrices[i][j].max() for i in range(len(matrices)) for j in range(len(matrices[i]))]) @click.command() @click.argument('badbam', type=click.Path(exists=True)) @click.option('--circle', type=click.Path(), help='Name of figure file for circle plot') @click.option('--matrix', type=click.Path(), help='Name of figure file for matrix plot') @click.option('--bin-size', type=float, default=None, help='Bin size in Mb. Omit to auto-scale') @click.option('--scaling-factor', type=float, default=None, help='Scale size of disks/lines in plot. Omit to auto-scale') @click.option('--i-know-what-i-am-doing', is_flag=True, help='Override bin density safety') def cli(badbam, circle, matrix, bin_size, scaling_factor, i_know_what_i_am_doing): """Prepare a binned matrix of mis-alignments and plot it in different ways""" chrom_lens, bins, bin_centers, matrices = \ compute_misalignment_matrix_from_bam(pysam.AlignmentFile(badbam, 'rb'), bin_size=bin_size, i_know_what_i_am_doing=i_know_what_i_am_doing) scaling_factor = scaling_factor or auto_scale_scaling_factor(matrices) if circle is not None: circle_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor) plt.savefig(circle) if matrix is not None: matrix_plot(chrom_lens, bins, bin_centers, matrices, scaling_factor, plot_grid=not is_grid_too_dense(bins)) plt.savefig(matrix) if __name__ == '__main__': cli()
# -*- coding: utf-8 -*- from __future__ import division, print_function __all__ = ["State"] import numpy as np from copy import deepcopy class State(object): """The state of the ensemble during an MCMC run For backwards compatibility, this will unpack into ``coords, log_prob, (blobs), random_state`` when iterated over (where ``blobs`` will only be included if it exists and is not ``None``). Args: coords (ndarray[nwalkers, ndim]): The current positions of the walkers in the parameter space. log_prob (ndarray[nwalkers, ndim], Optional): Log posterior probabilities for the walkers at positions given by ``coords``. blobs (Optional): The metadata โ€œblobsโ€ associated with the current position. The value is only returned if lnpostfn returns blobs too. random_state (Optional): The current state of the random number generator. """ __slots__ = "coords", "log_prob", "blobs", "random_state" def __init__(self, coords, log_prob=None, blobs=None, random_state=None, copy=False): dc = deepcopy if copy else lambda x: x if hasattr(coords, "coords"): self.coords = dc(coords.coords) self.log_prob = dc(coords.log_prob) self.blobs = dc(coords.blobs) self.random_state = dc(coords.random_state) return self.coords = dc(np.atleast_2d(coords)) self.log_prob = dc(log_prob) self.blobs = dc(blobs) self.random_state = dc(random_state) def __repr__(self): return "State({0}, log_prob={1}, blobs={2}, random_state={3})".format( self.coords, self.log_prob, self.blobs, self.random_state ) def __iter__(self): if self.blobs is None: return iter((self.coords, self.log_prob, self.random_state)) return iter((self.coords, self.log_prob, self.random_state, self.blobs))
"""expense_tracker URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ import users from django.contrib import admin from django.urls import include, path import users.views from . import views urlpatterns = [ path('admin/', admin.site.urls), path('', views.index), path('register/', views.register), path('', include('django.contrib.auth.urls')), path('users/', include('users.urls')), path('bills/', include('bills.urls')), path('loan/', include('loan.urls')), path('misc/', include('misc.urls')), path('store/', include('store.urls')), path('subscription/', include('subscription.urls')), path('myBudget/', include('budget.urls')), path('logout/', views.logout_user), path('update-password/', views.change_password), path('community/', include('community.urls')), path('myCalendar/', include('cal.urls')), ]
from tkinter import * import pygame import random import copy class tetrisGame(object): #################################### # Majority of the base Tetris Framework created by David Zhang @dlzhang Week6 # Adapted to Pygame and added notable features for the Term Project # Used pieces of the Pygame Framework from Lukas Peraza @http://blog.lukasperaza.com/getting-started-with-pygame/ #################################### # Tetris #################################### ##################################### # Event Handlers ##################################### #initializes the game def __init__(self, runAI = True, xPos = 0, yPos = 0, AISpeedInput = 10, AIDifficultyInput = 5, puzzleBoard = False, width = 400, height = 600, fps = 60, title = "Tetris", doubleManual = 0): self.runAI = runAI self.xPos = xPos self.yPos = yPos self.width = width self.height = height self.fps = fps self.title = title self.bgColor = (178, 7, 25) self.AISpeedInput = AISpeedInput self.AIDifficultyInput = AIDifficultyInput self.puzzleBoard = puzzleBoard self.doubleManual = doubleManual pygame.init() def mousePressed(self, x, y): # use event.x and event.y pass def mouseReleased(self, x, y): pass def mouseMotion(self, x, y): pass def mouseDrag(self, x, y): pass #Responds based on keys pressed def keyPressed(self, keyCode, modifier, screen): #if the game has ended, the only valid key press should be to restart if (self.isGameOver or self.isGameWon): if (keyCode == 114): self.init() #otherwise, give the option to pause or rotate/move the piece elif (keyCode == 112): self.isPaused = not self.isPaused elif (keyCode == 114): if (self.isPaused): self.init() elif (self.runAI == False): #if there is only one manual player, use standard controls if (self.doubleManual == 0): if (keyCode == 276): self.moveFallingPiece(0, -1) elif (keyCode == 275): self.moveFallingPiece(0, 1) elif (keyCode == 274): self.moveFallingPiece(1, 0) elif (keyCode == 273): self.rotateFallingPiece() elif (keyCode == 303) or (keyCode == 304): self.doHold() elif (keyCode == 32): self.hardDrop() #if there are two manual players, change the keys for player 1 elif (self.doubleManual == 1): if (keyCode == 97): self.moveFallingPiece(0, -1) elif (keyCode == 100): self.moveFallingPiece(0, 1) elif (keyCode == 115): self.moveFallingPiece(1, 0) elif (keyCode == 119): self.rotateFallingPiece() elif (keyCode == 304): self.doHold() elif (keyCode == 122): self.hardDrop() #if there are two manual players, change the keys for player 2 elif (self.doubleManual == 2): if (keyCode == 276): self.moveFallingPiece(0, -1) elif (keyCode == 275): self.moveFallingPiece(0, 1) elif (keyCode == 274): self.moveFallingPiece(1, 0) elif (keyCode == 273): self.rotateFallingPiece() elif (keyCode == 303): self.doHold() elif (keyCode == 13): self.hardDrop() #calls timerFired to modify the board every time the timerDelay passes def timerFired(self, dt): #if the game is over or the game is paused, don't move any pieces if (not self.isGameOver and not self.isPaused and not self.isGameWon): self.time += dt self.stopwatch += dt if (self.runAI == True): self.AIStep += dt #does a move based on the AI speed if self.AIStep >= self.AISpeed: self.AIStep %= self.AISpeed self.doStep = True self.findBestPlacement() if self.time >= 1000: #move the piece and if the piece can't move, returns False move = self.moveFallingPiece(1, 0) self.time %= 1000 #if the piece can't move, place the piece and try to remove the full #rows, then spawn a new piece if (not move): self.removeGhostPiece() self.placeFallingPiece() self.removeFullRows() self.newFallingPiece() #if the newly spawned piece can't be legally placed, display Game #Over if (not self.isLegal(self.board,self.fallingPiece,self.fallingPieceRow, self.fallingPieceCol,self.fallingPieceRows,self.fallingPieceCols)): self.isGameOver = True #calls all the draw functions when it is called def redrawAll(self, screen): # draws the game screen, the score at the top left, #and the gameOver screen if the game is over self.drawGame(screen) self.drawScore(screen) self.drawPaused(screen) self.drawGameOver(screen) self.drawGameWon(screen) def isKeyPressed(self, key): pass ###################################### # Game Data ###################################### #initializes the variables that will be used by the Tetris program def init(self,rows=20,cols=10,margin=100,cellWidth=18,cellHeight=18,randomRow=11): self.stopwatch = 0 self.stopwatchTime = "" self.rows = rows self.cols = cols self.margin = margin self.cellWidth = cellWidth self.cellHeight = cellHeight ###################################### # initializes the board ###################################### self.font = pygame.font.SysFont("gillsansultra", 15) self.emptyColor = (0,0,0) self.board = [[self.emptyColor]*self.cols for row in range(self.rows)] #defines the tetrominoes self.tetrisPieces = dict({'iPiece':((91,212,252),[[True, True, True, True]]), 'jPiece':((0,0,255),[[True, False, False], [True, True, True]]), 'lPiece':((232,126,5),[[False, False, True], [True, True, True]]), 'oPiece':((232,217,5),[[True, True], [True, True]]), 'sPiece':((0,255,0),[[False, True, True], [True, True, False]]), 'zPiece':((255,0,0),[[True, True, False], [False, True, True]]), 'tPiece':((156,5,232),[[False, True, False], [True, True, True]])}) ############################ # Vars for the hold piece and the queue pieces ############################ self.holdPiece = None self.currentPiece = None self.heldPiece = False self.queue = [] ############################# # Vars for the falling piece ############################# self.fallingPiece = 0 self.fallingPieceColor = "" self.fallingPieceRows = 0 self.fallingPieceCols = 0 self.fallingPieceRow = 0 self.fallingPieceCol = 0 ############################## # Vars for the ghost piece ############################## self.ghostPiece = 0 self.ghostColor = (222,222,222) self.ghostPieceRows = 0 self.ghostPieceCols = 0 self.ghostPieceRow = 0 self.ghostPieceCol = 0 self.ghostBoard = [[self.emptyColor]*self.cols for row in range(self.rows)] ############################## # Vars for extraneous events ############################## self.score = 0 self.isGameOver = False self.isPaused = False self.time = 0 ############################### # Vars for AI Calculating the Best Move ############################### self.colHeights = [0]*cols self.holes = [0]*cols self.gaps = [0]*cols self.AIlinesCleared = 0 self.boardScore = 0 self.holeWeight = 5.9288669341469555 self.colHeightWeight = 1.9162926946223073 self.gapWeight = 3.3878742140370637 self.clearWeight = 4.641363224169059 self.oldBoard = self.board self.simBoard = self.board self.simPiece = None self.placementColor = (100,100,100) ############################### # Vars for AI performing the Best Move ############################### self.AIStep = 0 self.doStep = False self.completedMove = True self.rotationNumber = 0 self.bestPiece = None self.bestCol = 0 ################################ # Testing with a "puzzle" board ################################ if (self.puzzleBoard == True): self.randomRow = randomRow self.nonEmptyColor = (100,100,100) for i in range(20): row = random.randint(self.randomRow,19) col = random.randint(0,9) if (self.board[row][col] == self.emptyColor): self.board[row][col] = self.nonEmptyColor ################################ # Modifying the AI Difficulty/Speed ################################ self.AISpeed = (11-self.AISpeedInput)*100 self.AIDifficulty = 0.96**(5-self.AIDifficultyInput) ################################ # Multiplayer Vars ################################ self.linesCleared = 0 self.garbageRows = 0 self.garbageColor = (50,50,50) self.addGarb = False self.isGameWon = False ############################### # Record Saving ############################### self.addedHistory = False self.recordedHistory = False ############################## # initializes the game ############################## self.makeQueue() self.newFallingPiece() #################################################### # Fundamental Game Functions #################################################### #draws the background rectangle and then calls drawBoard and drawFallingPiece def drawGame(self, screen): pygame.draw.rect(screen, (19,2,28), (55+self.xPos,55+self.yPos,280,460)) pygame.draw.rect(screen, (57,66,133), (50+self.xPos,50+self.yPos,280,460)) pygame.draw.rect(screen,(19,2,28),(100+self.xPos,100+self.yPos,180,360)) if (self.runAI == True): AIText = self.font.render("AI", 1, (146,148,150)) screen.blit(AIText, (170+self.xPos, 520+self.yPos)) if (self.AISpeedInput == 10.99): speedText = self.font.render("Speed: GOD", 1, (146,148,150)) else: speedText = self.font.render("Speed: " + str(self.AISpeedInput) + "/10", 1, (146,148,150)) screen.blit(speedText, (125+self.xPos,540+self.yPos)) if (self.AISpeedInput == 10.99): difficultyText = self.font.render("Intelligence: GOD", 1, (146,148,150)) else: difficultyText = self.font.render("Intelligence: " + str(self.AIDifficultyInput) + "/5", 1, (146,148,150)) screen.blit(difficultyText, (105+self.xPos,560+self.yPos)) else: manualText = self.font.render("Manual", 1, (146,148,150)) screen.blit(manualText, (150+self.xPos, 550+self.yPos)) #draws the stopwatch timeHours = self.stopwatch//(3600*1000) timeMinutes = (self.stopwatch//(60*1000))%60 timeSeconds = (self.stopwatch//1000)%60 timeMilliseconds = (self.stopwatch%1000) self.stopwatchTime = "%d:%02d:%02d.%03d" % (timeHours,timeMinutes,timeSeconds,timeMilliseconds) timerText = self.font.render(str(self.stopwatchTime), 1, (146,148,150)) darkTimerText = self.font.render(str(self.stopwatchTime), 1, (0,0,0)) screen.blit(darkTimerText, (251+self.xPos, 10+self.yPos)) screen.blit(timerText, (250+self.xPos, 10+self.yPos)) self.drawQueue(screen) self.drawHold(screen) self.drawBoard(screen) self.drawGhostPiece(screen) self.drawFallingPiece(screen) #draws the board by calling drawCell for each cell in the board def drawBoard(self, screen): for row in range(self.rows): for col in range(self.cols): self.drawCell(screen, row, col, self.board[row][col]) #gets the x0, y0, x1, y1, of each cell/square based on the row and column of #the square in the board def getCellBounds(self, row, col): x0 = self.margin + (col*self.cellWidth) y0 = self.margin + (row*self.cellHeight) x1 = x0 + self.cellWidth y1 = y0 + self.cellHeight return (x0, y0, x1, y1) #draws each cell based on the row and column of the cell and its given color def drawCell(self, screen, row, col, color): (x0, y0, x1, y1) = self.getCellBounds(row, col) m = 2 #cell margin if (color == (0,0,0)): pygame.draw.rect(screen, (55,55,55), (x0+self.xPos, y0+self.yPos, x1-x0-m, y1-y0-m), 1) else: m = 3 (color1, color2, color3) = color darkColor1 = max(0, color1 - 50) darkColor2 = max(0, color2 - 50) darkColor3 = max(0, color3 - 50) pygame.draw.rect(screen, (darkColor1,darkColor2,darkColor3), (x0+self.xPos,y0+self.yPos,x1-x0-m,y1-y0-m)) pygame.draw.rect(screen, color, (x0+m+self.xPos, y0+m+self.yPos, x1-x0-m-m, y1-y0-m-m)) #spawns a new falling piece def newFallingPiece(self): self.queue.append(random.choice(['iPiece','jPiece','lPiece','oPiece', 'sPiece','zPiece','tPiece'])) self.currentPiece = self.queue[0] self.queue = self.queue[1:] self.fallingPiece = self.tetrisPieces[self.currentPiece][1] self.fallingPieceColor = self.tetrisPieces[self.currentPiece][0] #identifies the number of rows and cols that the piece has self.fallingPieceRows = len(self.fallingPiece) self.fallingPieceCols = len(self.fallingPiece[0]) #identifies which row and which column the top-left corner of the falling #piece is located in self.fallingPieceRow = 0 self.fallingPieceCol = self.cols//2 - (self.fallingPieceCols//2) self.removeGhostPiece() self.placeGhostPiece() #holds the current piece and replaces it with the piece in the hold queue def doHold(self): #you cannot hold twice in a row, so if a piece has just been held, #prevents the player from holding again if (self.heldPiece == False): self.heldPiece = True #if there is no piece in the hold queue, then place the current piece #in the hold queue, and use the next piece as the current piece, otherwise #replace the hold piece with the current piece if (self.holdPiece != None): (self.holdPiece,self.currentPiece) = (self.currentPiece,self.holdPiece) self.newHeldPiece(self.currentPiece) else: self.holdPiece = self.currentPiece self.newFallingPiece() #draws the falling piece by drawing the piece over the board, similar to how #the board was drawn, but only drawing when the section of the piece is true def drawFallingPiece(self, screen): self.removeGhostPiece() self.drawGhostPiece(screen) for row in range(self.fallingPieceRows): printRow = row + self.fallingPieceRow for col in range(self.fallingPieceCols): if (self.fallingPiece[row][col]): printCol = col + self.fallingPieceCol color = self.fallingPieceColor self.drawCell(screen, printRow, printCol, color) #moves the falling piece by altering the position of the top-left corner of #the piece. If the piece cannot move in said direction, undo the move. If the #move is possible, returns True, else returns False. def moveFallingPiece(self, drow, dcol): if (self.isGameOver == False) and (self.isPaused == False): self.fallingPieceRow += drow self.fallingPieceCol += dcol if (not self.isLegal(self.board,self.fallingPiece,self.fallingPieceRow, self.fallingPieceCol,self.fallingPieceRows,self.fallingPieceCols)): self.fallingPieceRow -= drow self.fallingPieceCol -= dcol return False self.removeGhostPiece() self.placeGhostPiece() return True #rotates the falling piece clockwise 90 degrees def rotateFallingPiece(self): #keeps track of all of the values of the falling piece in case we need to #undo the rotation if (self.isGameOver == False) and (self.isPaused == False): oldPiece = self.fallingPiece oldRow = self.fallingPieceRow oldCol = self.fallingPieceCol oldRows = self.fallingPieceRows oldCols = self.fallingPieceCols newPiece = [[False]*oldRows for i in range(oldCols)] #creates the new piece by assigning the appropriate indices in the initial #piece to the appropriate indices in the final piece for row in range(oldRows): for col in range(oldCols): newPiece[col][oldRows - 1 - row] = oldPiece[row][col] #updates the falling piece to the newly rotated falling piece self.fallingPiece = newPiece self.fallingPieceRows = len(newPiece) self.fallingPieceCols = len(newPiece[0]) #if the rotation isn't legal, undo all the previous changes if (not self.isLegal(self.board,self.fallingPiece,self.fallingPieceRow, self.fallingPieceCol,self.fallingPieceRows,self.fallingPieceCols)): self.fallingPiece = oldPiece self.fallingPieceRow = oldRow self.fallingPieceCol = oldCol self.fallingPieceRows = oldRows self.fallingPieceCols = oldCols self.removeGhostPiece() self.placeGhostPiece() #tests if the piece is legal by making sure the proposed move/rotation is both #still inside the board and does not place the piece inside another non-empty #piece/block. def isLegal(self, board, piece, pieceRow, pieceCol, pieceRows, pieceCols): for row in range(pieceRows): checkRow = row + pieceRow for col in range(pieceCols): checkCol = col + pieceCol if ((checkRow >= self.rows) or (checkCol >= self.cols) or (checkRow < 0) or (checkCol < 0)): return False #if the square in the piece is True and the square is also taken #by another piece, return True if (piece[row][col]) and (board[checkRow][checkCol] != self.emptyColor): return False return True #once the piece has reached the bottom, place the falling piece by adding it to #the board def placeFallingPiece(self): for row in range(self.fallingPieceRows): placeRow = row + self.fallingPieceRow for col in range(self.fallingPieceCols): placeCol = col + self.fallingPieceCol if (self.fallingPiece[row][col]): self.board[placeRow][placeCol] = self.fallingPieceColor self.heldPiece = False #if the game is paused, display that the game is paused def drawPaused(self, screen): if (self.isPaused): pygame.draw.rect(screen, (0,0,0), (105+self.xPos,205+self.yPos,180,100)) pygame.draw.rect(screen, (155,155,155), (100+self.xPos,200+self.yPos,180,100)) pauseText = self.font.render("PAUSED", 1, (255,255,255)) screen.blit(pauseText, (150+self.xPos,225+self.yPos)) restartText = self.font.render("(press r to restart)", 1, (255,255,255)) screen.blit(restartText, (102+self.xPos,265+self.yPos)) #if the player has won, print the win screen def drawGameWon(self, screen): if (self.isGameWon): pygame.draw.rect(screen, (0,0,0), (65+self.xPos,255+self.yPos,300,70)) pygame.draw.rect(screen, (255,255,255), (60+self.xPos,250+self.yPos,300,70)) scoreText = self.font.render("You win! Final Score:" + str(self.score), 1, (46,48,50)) screen.blit(scoreText, (100+self.xPos,265+self.yPos)) restartText = self.font.render("(press r to restart)", 1, (46,48,50)) screen.blit(restartText, (112+self.xPos,290+self.yPos)) #if the player has lost, print the game over screen def drawGameOver(self, screen): if (self.isGameOver): pygame.draw.rect(screen, (0,0,0), (65+self.xPos,255+self.yPos,300,70)) pygame.draw.rect(screen, (46,48,50), (60+self.xPos,250+self.yPos,300,70)) scoreText = self.font.render("Game Over! Final Score:" + str(self.score), 1, (255,255,255)) screen.blit(scoreText, (100+self.xPos,265+self.yPos)) restartText = self.font.render("(press r to restart)", 1, (255,255,255)) screen.blit(restartText, (122+self.xPos,290+self.yPos)) #removes full Rows and increments the score accordingly def removeFullRows(self): newRow = self.rows oldScore = self.score self.linesCleared = 0 #makes an empty newBoard which will replace the old board newBoard = [[self.emptyColor]*self.cols for row in range(self.rows)] for row in range(self.rows-1, 0, -1): addRow = False #if we see garbage or an empty piece, the row is not cleared so we must add it to the new board for col in range(self.cols): if (self.board[row][col] == (self.emptyColor) or (self.board[row][col] == self.garbageColor)): addRow = True if (addRow == True): newRow -= 1 for col in range(self.cols): newBoard[newRow][col] = self.board[row][col] #if we don't add the row then the row is cleared and we increment the score else: self.score += 1 self.linesCleared += 1 #if there is garbage on the board and the player has cleared line(s) remove appropriate amounts of garbage while (self.hasGarbage(newBoard) == True) and (self.linesCleared > 0): newBoard = self.removeGarbage(newBoard) #if the newBoard is different then the board has changed and we update the board if (newBoard != self.board): self.board = newBoard self.removeGhostPiece() self.placeGhostPiece() #adds rows of garbage to the bottom of the board def addGarbage(self, rows): for addRow in range(rows): tempBoard = copy.deepcopy(self.board) for row in range(self.rows): for col in range(self.cols): #if the top row is already filled, then the player is shoved past the top and loses if (self.board[0][col] != self.emptyColor): self.isGameOver = True #since we are moving every block one row up, we must construct the bottom row elif (row == self.rows-1): self.board[row][col] = self.garbageColor else: self.board[row][col] = tempBoard[row+1][col] #removes garbage from the board based on the number of lines cleared def removeGarbage(self, board): self.linesCleared -= 1 tempBoard = copy.deepcopy(board) for row in range(self.rows): for col in range(self.cols): #since we move each block one row down, we must construct the top-most row if (row == 0): board[row][col] = self.emptyColor else: board[row][col] = tempBoard[row-1][col] return board #checks if the board contains a garbage row or not def hasGarbage(self, board): if (board[self.rows-1][0] == self.garbageColor): return True else: return False #counts the number of full rows (but does not clear them), used to calculate board #score def countFullRows(self): self.AIlinesCleared = 0 for row in range(self.rows-1, 0, -1): clearedLine = True for col in range(self.cols): if (self.simBoard[row][col] == (self.emptyColor)): clearedLine = False break if (clearedLine == True): self.AIlinesCleared += 1 #returns how many lines are cleared so that it can be used by the multiplayer feature def sendGarbage(self): return self.linesCleared #returns if one of the players loses for the multiplayer feature def gameLost(self): return self.isGameOver #if gameWon is called, the player has won the game def gameWon(self): self.isGameWon = True ############################################ # Extraneous Game Functions (for better gameplay) ############################################ #draws the score of the player in the top left corner def drawScore(self, screen): score = "Score: " + str(self.score) scoreText = self.font.render(score, 1, (146,148,150)) darkScoreText = self.font.render(score, 1, (0,0,0)) screen.blit(darkScoreText, (11+self.xPos,10+self.yPos)) screen.blit(scoreText, (10+self.xPos,10+self.yPos)) #hard drops the tetriminoe def hardDrop(self): if (not self.isPaused): while (self.moveFallingPiece(1,0)): pass self.placeFallingPiece() self.removeFullRows() self.newFallingPiece() ############################################## # Queue/Hold Functions ############################################## #makes the queue def makeQueue(self): for i in range(5): self.queue.append(random.choice(['iPiece','jPiece','lPiece','oPiece', 'sPiece','zPiece','tPiece'])) #Draws the queue def drawQueue(self, screen): pygame.draw.rect(screen, (19,2,28),(290+self.xPos,135+self.yPos,100,250)) pygame.draw.rect(screen, (57,66,133),(285+self.xPos,130+self.yPos,100,250)) pygame.draw.rect(screen, (19,2,28),(295+self.xPos,170+self.yPos,80,200)) #draws each of the five pieces in the queue for i in range(5): piece = self.queue[i] xIndex = 310 yIndex = 175 + i*40 self.drawComponents(screen, xIndex, yIndex, self.tetrisPieces[piece][0], self.tetrisPieces[piece][1]) queueText = self.font.render("Next", 1, (146,148,150)) screen.blit(queueText, (310+self.xPos,140+self.yPos)) #Draws the hold def drawHold(self, screen): pygame.draw.rect(screen, (19,2,28),(15+self.xPos,135+self.yPos,80,110)) pygame.draw.rect(screen, (57,66,133),(10+self.xPos,130+self.yPos,80,110)) pygame.draw.rect(screen, (19,2,28),(20+self.xPos,170+self.yPos,60,60)) holdText = self.font.render("Hold", 1, (146,148,150)) screen.blit(holdText, (25+self.xPos,145+self.yPos)) #draws the piece in the hold queue if (self.holdPiece != None): color = self.tetrisPieces[self.holdPiece][0] shape = self.tetrisPieces[self.holdPiece][1] (xIndex, yIndex) = (30,195) self.drawComponents(screen, xIndex, yIndex, color, shape) #spawns the held piece after the player holds def newHeldPiece(self, currentPiece): self.fallingPiece = self.tetrisPieces[currentPiece][1] self.fallingPieceColor = self.tetrisPieces[currentPiece][0] #identifies the number of rows and cols that the piece has self.fallingPieceRows = len(self.fallingPiece) self.fallingPieceCols = len(self.fallingPiece[0]) #identifies which row and which column the top-left corner of the falling #piece is located in self.fallingPieceRow = 0 self.fallingPieceCol = self.cols//2 - (self.fallingPieceCols//2) self.removeGhostPiece() self.placeGhostPiece() #draws the pieces that aren't on the board (i.e. hold and queue) def drawComponents(self, screen, xIndex, yIndex, color, shape): rows = len(shape) cols = len(shape[0]) for row in range(rows): yPos = yIndex + row*(self.cellHeight-6) for col in range(cols): xPos = xIndex + col*(self.cellWidth-6) if (shape[row][col] == True): m = 3 (color1, color2, color3) = color darkColor1 = max(0, color1 - 50) darkColor2 = max(0, color2 - 50) darkColor3 = max(0, color3 - 50) pygame.draw.rect(screen, (darkColor1,darkColor2,darkColor3), (xPos+self.xPos-2,yPos+self.yPos-2,self.cellWidth-7,self.cellWidth-7)) pygame.draw.rect(screen, color, (xPos+self.xPos,yPos+self.yPos,self.cellWidth-9,self.cellHeight-9)) #################################################### # Ghost Piece Functions #################################################### #places the ghost piece in the correct position def placeGhostPiece(self): self.ghostPiece = self.fallingPiece self.ghostPieceRow = self.fallingPieceRow self.ghostPieceRows = self.fallingPieceRows self.ghostPieceCol = self.fallingPieceCol self.ghostPieceCols = self.fallingPieceCols loop = True #pushes the piece as low as it can go while still being legal while (loop == True): self.ghostPieceRow += 1 if (not self.isLegal(self.board,self.ghostPiece,self.ghostPieceRow, self.ghostPieceCol,self.ghostPieceRows,self.ghostPieceCols)): self.ghostPieceRow -= 1 loop = False #places the ghost tile in each of the specified positions for row in range(self.ghostPieceRows): placeGhostRow = row + self.ghostPieceRow for col in range(self.ghostPieceCols): placeGhostCol = col + self.ghostPieceCol if (self.ghostPiece[row][col] == True): self.ghostBoard[placeGhostRow][placeGhostCol] = self.ghostColor #draws the ghost piece onto the screen def drawGhostPiece(self, screen): for row in range(self.ghostPieceRows): printRow = row + self.ghostPieceRow for col in range(self.ghostPieceCols): if (self.ghostPiece[row][col]): printCol = col + self.ghostPieceCol color = self.ghostColor self.drawCell(screen, printRow, printCol, color) #removes the old ghost piece every time the ghost piece moves position def removeGhostPiece(self): ghostCount = 0 for row in range(self.rows): for col in range(self.cols): if (self.ghostBoard[row][col] == self.ghostColor): ghostCount += 1 self.ghostBoard[row][col] = self.emptyColor if (ghostCount == 4): break if (ghostCount == 4): break ######################################################## # AI Functions ######################################################## #gets the row of the highest block in each column def getColHeights(self): (rows, cols) = (self.rows, self.cols) for col in range(cols): for row in range(rows): #searches for the highest block in the column and adds it to the list if (self.simBoard[row][col] != (self.emptyColor or self.ghostColor)): self.colHeights[col] = (rows - 1) - (row - 1) break #if we are at the bottom row, add it to the list elif (row == rows - 1): self.colHeights[col] = 0 #counts the number of holes in the board (i.e. the number of empty squares underneath filled squares) #also counts the height of columns over the holes (number of filled squares over holes) def countHoles(self): self.holes = [0]*self.cols self.holeWeight = 10 (rows, cols) = (self.rows, self.cols) for col in range(cols): #starts the counting of the column at the first filled square of the column startRow = rows - self.colHeights[col] colHeight = rows - startRow for row in range(colHeight): #counts the tiles before we reach a hole and stores it. If no #hole is reached then we ignore the tiles. if (self.simBoard[startRow + row][col] == self.emptyColor): self.holes[col] += 1 #the more holes there are, the more heavily holes are weighted self.hasHole = True #counts the gap between each column and the neighboring columns. Gaps are bad when they are on both sides #(i.e. the gap is one tile wide) so we take the min of the gap on the left and the gap on the right def getGaps(self): (rows, cols) = (self.rows, self.cols) for col in range(cols): #if we are checking the first or last column, then we only need to compare to one other column if (col == 0): self.gaps[col] = self.colHeights[1] - self.colHeights[col] elif (col == 9): self.gaps[col] = self.colHeights[8] - self.colHeights[col] else: leftGap = self.colHeights[col-1] - self.colHeights[col] rightGap = self.colHeights[col+1] - self.colHeights[col] self.gaps[col] = min(leftGap, rightGap) for col in range(cols): if (self.gaps[col] < 0): self.gaps[col] = 0 #returns a 3D list of the possible rotations of the given piece def possibleRotations(self, piece): if (piece == "iPiece"): return [[[True,True,True,True]], [[True],[True],[True],[True]]] elif (piece == "jPiece"): return [[[True, False, False],[True, True, True]], [[True, True],[True,False],[True,False]], [[True, True, True],[False, False, True]], [[False, True],[False, True], [True,True]]] elif (piece == "lPiece"): return [[[False,False,True],[True,True,True]], [[True,False],[True,False],[True,True]], [[True,True,True],[True,False,False]], [[True,True],[False,True],[False,True]]] elif (piece == "oPiece"): return [[[True,True],[True,True]]] elif (piece == "sPiece"): return [[[False,True,True],[True,True,False]], [[True,False],[True,True],[False,True]]] elif (piece == "zPiece"): return [[[True,True,False],[False,True,True]], [[False,True],[True,True],[True,False]]] elif (piece == "tPiece"): return [[[False,True,False],[True,True,True]], [[True,False],[True,True],[True,False]], [[True,True,True],[False,True,False]], [[False,True],[True,True],[False,True]]] #calculates the "score" of the board using the appropriate weights def calculateBoardScore(self): self.boardScore = 0 for colHeight in self.colHeights: self.boardScore -= (colHeight ** self.colHeightWeight) for hole in self.holes: self.boardScore -= (hole * self.holeWeight) self.gapWeight = 5 for gap in self.gaps: if (gap > 2): self.boardScore -= (self.gapWeight * gap) self.clearWeight = 10 #clearing more lines at one time is better for clear in range(self.AIlinesCleared): self.boardScore += self.clearWeight self.clearWeight += 5 return self.boardScore #iterates through all of the possible rotations and placements of the piece and #the hold piece and finds the best possible move def findBestPlacement(self): self.oldBoard = self.board #only finds the new best move after the last move has been completed if (self.completedMove == True): self.completedMove = False highestScore = -9999 #loops twice, once for the current piece and once for the hold #piece for i in range(2): if (i == 0): piece = self.currentPiece else: if (self.heldPiece == False): if (self.holdPiece != None): piece = self.holdPiece else: piece = self.queue[0] else: piece = self.currentPiece #has a chance of doing a random move (only tests to do a random move once per piece) doesWrongMove = random.uniform(0,1) if (i == 0) and (doesWrongMove >= self.AIDifficulty): self.rotationNumber = random.randint(0,3) self.bestPiece = self.currentPiece pieceWidth = len(self.possibleRotations(self.bestPiece)[0][0]) self.bestCol = random.randint(0,(10-pieceWidth)) self.doAIMove() return rotations = self.possibleRotations(piece) rotationNumber = -1 #goes through all the possible rotations of the piece and finds the score for rotation in rotations: rotationNumber += 1 pieceWidth = len(rotation[0]) #goes through all the possible cols the piece can be placed for col in range(self.cols-(pieceWidth-1)): self.simBoard = copy.deepcopy(self.oldBoard) score = self.hardDropCandidate(rotation, col) if (score > highestScore): (self.rotationNumber, self.bestCol, self.bestPiece) = (rotationNumber, col, piece) highestScore = score #after the ideal move has been calculated, perform the best move self.doAIMove() #places each candidate move and returns the board score of the move def hardDropCandidate(self, piece, colPos): (rowPos, rows, cols) = (0, len(piece), len(piece[0])) #if the move is not possible, return an extremely low score if (not self.isLegal(self.simBoard, piece, rowPos, colPos, rows, cols)): return -9999 #because the isLegal is checked after incrementing row, will always go one over while (self.isLegal(self.simBoard, piece, rowPos, colPos, rows, cols)): rowPos += 1 rowPos -= 1 #places the piece for row in range(rows): rowCheck = row + rowPos for col in range(cols): colCheck = col + colPos if (piece[row][col]): self.simBoard[rowCheck][colCheck] = self.placementColor self.getColHeights() self.countHoles() self.getGaps() self.countFullRows() score = self.calculateBoardScore() return score #moves the pieces to the ideal position one rotation or one movement at a time and updates #the display so that the user can see what the AI is doing def doAIMove(self): (col, rotation, bestPiece) = (self.bestCol, self.rotationNumber, self.bestPiece) #if we can hold and the ideal move is to hold, then we hold if (bestPiece != self.currentPiece) and (self.doStep == True) and (self.heldPiece == False): self.doHold() pygame.display.flip() self.doStep = False #if the ideal move requires more rotations, continue rotating the piece until the #desired rotation is achieved while (rotation > 0) and (self.doStep == True): self.rotationNumber -= 1 if (self.rotateFallingPiece() == False): self.isGameOver = True return None pygame.display.flip() self.doStep = False if (col == self.fallingPieceCol): pass #if the goal column is less than the falling piece's current column, move it to the left, #same for if the goal column is greater than the falling piece's current column elif (col < self.fallingPieceCol): while (col < self.fallingPieceCol) and (self.doStep == True): if (self.moveFallingPiece(0,-1) == False): self.isGameOver = True return None pygame.display.flip() self.doStep = False elif (col > self.fallingPieceCol): while (col > self.fallingPieceCol) and (self.doStep == True): if (self.moveFallingPiece(0, 1) == False): self.isGameOver = True return None pygame.display.flip() self.doStep = False #if the piece is in the proper rotation and column, hard drop the piece in place if ((bestPiece == self.currentPiece) and (rotation == 0) and (col == self.fallingPieceCol)): self.completedMove = True self.hardDrop() ###################################### # Past Games ###################################### #when the game ends, returns the relevant information describing the game def addToHistory(self): if (self.addedHistory == False): self.addedHistory = True if (self.runAI == True): gameType = "AI" else: gameType = "Manual" gameTime = self.stopwatchTime finalScore = self.score winLoss = None if (self.isGameWon == True): winLoss = "Win" elif (self.isGameOver == True): winLoss = "Loss" if (self.runAI == True): speedLevel = self.AISpeedInput intelligenceLevel = self.AIDifficultyInput return [gameType, gameTime, finalScore, speedLevel, intelligenceLevel,winLoss] return[gameType,gameTime,finalScore,winLoss] #adds game history data to a text file def recordHistory(recordedHistory, data1, data2=None): if (recordedHistory == False): fileHistory = open('scores.txt','a') if (data2 == None): data1 = "\n1 " + str(data1) fileHistory.write(data1) else: data1 = "\n2 " + str(data1) data2 = " " + str(data2) fileHistory.write(data1) fileHistory.write(data2) fileHistory.close() return True #analyzes the history text file def analyzeHistory(data): fileHistory = open('scores.txt', 'r') #converts the text file data into a 3D list history = [] for line in fileHistory: splitLine = line.split("[") eventHistory = [] for element in splitLine: if (len(element) == 1): pass elif (element[-2] == "]"): element = element[:-2] elif (element[-1] == "]"): element = element[:-1] gameData = [] splitElement = element.split(",") for dataPiece in splitElement: dataPiece = dataPiece.strip() gameData.append(dataPiece) eventHistory.append(gameData) history.append(eventHistory) #searches through the 3D list for the highest score manually for game in history: #if it's one player, check the line count if (game[0] == ['1']): #only keeps high score if it is a user playing if (game[1][0] == "'Manual'"): #print(game[1][2]) score = int(game[1][2]) #print(score) if (score > data.highscore): data.highscore = score data.bestGame = game #if it's two player, check line counts elif (game[0] == ['2']): #only keeps the high scores if it is a user playing. checks both players for i in range(1,3): if (game[i][0] == "'Manual'"): score = int(game[i][2]) if (score > data.highscore): data.highscore = score data.bestGame = game #searches through the 3D list for the best win over an AI for game in history: if (game[0] == ['2']): if (game[1][0] == "'Manual'") and (game[2][0] == "'AI'"): if (game[1][3] == "'Win'"): beatAITime = game[1][1] beatAISpeed = int(game[2][3]) beatAIIntelligence = int(game[2][4]) compareStats(data,game,beatAITime,beatAISpeed,beatAIIntelligence) elif (game[1][0] == "'AI'") and (game[2][0] == "'Manual'"): if (game[2][3] == "'Win'"): beatAITime = game[1][1] beatAISpeed = int(game[1][3]) beatAIIntelligence = int(game[1][4]) compareStats(data,beatAITime,beatAISpeed,beatAIIntelligence) data.history = history fileHistory.close() #compares if the given AI win is more difficult than another AI win def compareStats(data, game,beatAITime, beatAISpeed, beatAIIntelligence): if (beatAISpeed*beatAIIntelligence > data.bestAISpeed*data.bestAIIntelligence): data.bestAIGame = game data.bestAITime = beatAITime data.bestAISpeed = beatAISpeed data.bestAIIntelligence = beatAIIntelligence elif (beatAISpeed*beatAIIntelligence == data.bestAISpeed*data.bestAIIntelligence): if (beatAITime < data.bestAITime): data.bestAIGame = game data.bestAITime = beatAITime data.bestAISpeed = beatAISpeed data.bestAIIntelligence = beatAIIntelligence ###################################### # Genetic Algorithm ###################################### #tests the genetic algorithm by performing the AI without pausing class geneticAlgorithm(tetrisGame): #modifies the weights of the board calculation based on the inputs def __init__(self, holeWeight, colHeightWeight, gapWeight, clearWeight, lineCap, randomRow = 19): super().__init__() self.holeInput = holeWeight self.colHeightInput = colHeightWeight self.gapInput = gapWeight self.clearInput = clearWeight self.lineCap = lineCap self.randomRow = randomRow print(self.randomRow) #initializes the board def init(self): import random super().init() self.holeWeight = self.holeInput self.colHeightWeight = self.colHeightInput self.gapWeight = self.gapInput self.clearWeight = self.clearInput self.nonEmptyColor = (100,100,100) for i in range(20): row = random.randint(self.randomRow,19) col = random.randint(0,9) if (self.board[row][col] == self.emptyColor): self.board[row][col] = self.nonEmptyColor self.doMove = 0 #standard run function def run(self): screen = pygame.display.set_mode((self.width, self.height)) pygame.display.set_caption(self.title) clock = pygame.time.Clock() self._keys = dict() self.init() runGame = True while runGame: time = clock.tick(self.fps) self.timerFired(time) for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: self.mousePressed(*(event.pos)) elif event.type == pygame.MOUSEBUTTONUP and event.button == 1: self.mouseReleased(*(event.pos)) elif (event.type == pygame.MOUSEMOTION and event.buttons == (0, 0, 0)): self.mouseMotion(*(event.pos)) elif (event.type == pygame.MOUSEMOTION and event.buttons[0] == 1): self.mouseDrag(*(event.pos)) elif event.type == pygame.KEYDOWN: self._keys[event.key] = True self.keyPressed(event.key, event.mod, screen) elif event.type == pygame.QUIT: runGame = False screen.fill(self.bgColor) self.redrawAll(screen) pygame.display.flip() if (self.isGameOver == True): return self.score runGame = False if (self.score > self.lineCap): return self.score runGame = False pygame.quit() def timerFired(self, dt): #if the game is over or the game is paused, don't move any pieces if (not self.isGameOver and not self.isPaused and not self.isGameWon): self.findBestPlacement() #finds the best placement of the piece by searching through all the possible moves, #calculating the score of each move, and then performing the move. similar to the #other findBestPlacement function however does not have the randomly do bad moves feature def findBestPlacement(self): if (self.isGameOver == False): self.oldBoard = self.board (bestRotation, bestCol, bestPiece) = (None, None, self.currentPiece) highestScore = -9999 #iterates twice, once for the current piece, once for the hold for i in range(2): if (i == 0): piece = self.currentPiece else: if (self.heldPiece == False): if (self.holdPiece != None): piece = self.holdPiece else: piece = self.queue[0] else: piece = self.currentPiece rotations = self.possibleRotations(piece) rotationNumber = -1 #goes through the possible rotations of the piece for rotation in rotations: rotationNumber += 1 pieceWidth = len(rotation[0]) #goes through the columns the piece can be placed for col in range(self.cols-(pieceWidth-1)): self.simBoard = copy.deepcopy(self.oldBoard) score = self.hardDropCandidate(rotation, col) if (score > highestScore): (self.rotationNumber, self.bestCol, self.bestPiece) = (rotationNumber, col, piece) highestScore = score self.doAIMove() else: return None #similar to the doAIMove of the tetrisGame object, however, does the entire move at once instead of #splitting into separate steps def doAIMove(self): (col, bestPiece) = (self.bestCol, self.bestPiece) #holds the piece if necessary if (bestPiece != self.currentPiece) and (self.heldPiece == False): self.doHold() #rotates the piece as many times as possible while (self.rotationNumber > 0): self.rotationNumber -= 1 if (self.rotateFallingPiece() == False): self.isGameOver = True return None #moves the piece left/right if (col == self.fallingPieceCol): pass elif (col < self.fallingPieceCol): while (col < self.fallingPieceCol): if (self.moveFallingPiece(0,-1) == False): self.isGameOver = True return None elif (col > self.fallingPieceCol): while (col > self.fallingPieceCol): if (self.moveFallingPiece(0,1) == False): self.isGameOver = True return None #if the piece is in the best possible position, hard drop it if (bestPiece != self.currentPiece) and (self.heldPiece == True): self.hardDrop() self.heldPiece = False if (bestPiece == self.currentPiece) and (self.rotationNumber == 0) and (col == self.fallingPieceCol): self.hardDrop() self.heldPiece = False ######################################### # Main() ######################################### def run(width = 800, height = 600, fps = 60, title = "Tetris"): class Struct(object): pass data = Struct() screen = pygame.display.set_mode((width,height)) pygame.display.set_caption(title) clock = pygame.time.Clock() init(data) backgroundColor = (255,255,255) runGame = True while (runGame == True): time = clock.tick(fps) timerFired(time, data) for event in pygame.event.get(): if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1: mousePressed(data, *(event.pos)) elif event.type == pygame.MOUSEBUTTONUP and event.button == 1: mouseReleased(data, *(event.pos)) elif (event.type == pygame.MOUSEMOTION and event.buttons == (0, 0, 0)): mouseMotion(data, *(event.pos)) elif (event.type == pygame.MOUSEMOTION and event.buttons[0] == 1): mouseDrag(data, *(event.pos)) elif event.type == pygame.KEYDOWN: keyPressed(event.key, event.mod, screen, data) elif event.type == pygame.QUIT: runGame = False screen.fill(backgroundColor) redrawAll(screen, data) pygame.display.flip() pygame.quit() ########################################### # Mode Dispatcher ########################################### def init(data): pygame.init() data.page = "HomePage" data.game0 = None data.game1 = None data.godGame = None data.turquoise = (44,177,222) data.darkTurquoise = (17, 73, 92) data.gray = (38,40,42) data.lastPage = "HomePage" data.visitedGame = False data.choseGameMode = "Solo" data.puzzleBoard = False data.playerOne = "Manual" data.playerTwo = "Manual" data.oneAIIntelligence = 5 data.oneAISpeed = 1 data.twoAIIntelligence = 5 data.twoAISpeed = 1 data.sendGame0 = None data.sendGame1 = None data.gameOver0 = None data.gameOver1 = None data.font = pygame.font.SysFont("gillsansultra", 20) data.font0 = pygame.font.SysFont("gillsansultra", 40) data.font1 = pygame.font.SysFont("gillsansultra", 14) data.font2 = pygame.font.SysFont("gillsansultra", 12) data.font3 = pygame.font.SysFont("gillsansultra", 30) data.recordedHistory = False data.highscore = 0 data.history = [] data.bestGame = [] data.bestAIGame = [] data.bestAISpeed = 0 data.bestAIIntelligence = 0 data.bestAITime = "" data.newHighscore = False data.newHighscoreCounter = 0 def timerFired(time, data): if (data.page == "GamePage"): gamePageTimerFired(time, data) elif (data.page == "GodPage"): godPageTimerFired(time,data) def mousePressed(data, x, y): if (data.page == "HomePage"): homePageMousePressed(data, x, y) elif (data.page == "HelpPage"): helpPageMousePressed(data, x, y) elif (data.page == "SelectionPage"): selectionPageMousePressed(data, x, y) elif (data.page == "GamePage"): gamePageMousePressed(data, x, y) elif (data.page == "GodPage"): godPageMousePressed(data, x, y) elif (data.page == "highscorePage"): highscorePageMousePressed(data,x,y) def mouseReleased(data, x, y): if (data.page == "SelectionPage"): selectionPageMouseReleased(data, x, y) def mouseMotion(data, x, y): if (data.page == "HomePage"): homePageMouseMotion(data, x, y) elif (data.page == "HelpPage"): helpPageMouseMotion(data, x, y) elif (data.page == "SelectionPage"): selectionPageMouseMotion(data, x, y) def mouseDrag(data, x, y): if (data.page == "SelectionPage"): selectionPageMouseDrag(data, x, y) #Responds based on keys pressed def keyPressed(keyCode, modifier, screen, data): if (data.page == "HomePage"): homePageKeyPressed(keyCode, modifier, screen, data) elif (data.page == "HelpPage"): helpPageKeyPressed(keyCode, modifier, screen, data) elif (data.page == "SelectionPage"): selectionPageKeyPressed(keyCode, modifier, screen, data) elif (data.page == "GamePage"): gamePageKeyPressed(keyCode, modifier, screen, data) elif (data.page == "GodPage"): godPageKeyPressed(keyCode, modifier, screen, data) elif (data.page == "highscorePage"): highscorePageKeyPressed(keyCode, modifier, screen,data) def redrawAll(screen, data): if (data.page == "HomePage"): homePageRedrawAll(screen, data) elif (data.page == "HelpPage"): helpPageRedrawAll(screen, data) elif (data.page == "SelectionPage"): selectionPageRedrawAll(screen, data) elif (data.page == "GamePage"): gamePageRedrawAll(screen, data) elif (data.page == "GodPage"): godPageRedrawAll(screen, data) elif (data.page == "highscorePage"): highscorePageRedrawAll(screen, data) newHighScoreRedrawAll(screen,data) ########################################## # Universally Used Functions ########################################## #draws the tetris header def header(screen): #image from official tetris website @http://tetris.com/about-tetris/ tetrisImage = pygame.image.load('images/Tetris_Web_Border.jpg').convert_alpha() screen.blit(tetrisImage, (0,0)) #draws the help and home button def helpHomeButton(screen): #help image from @http://www.freeiconspng.com/free-images/help-desk-icon-13739 helpImage = pygame.image.load('images/helpButton.jpg').convert_alpha() screen.blit(helpImage, (740,570)) #home image from @http://www.freeiconspng.com/free-images/address-icon-1741 homeImage = pygame.image.load('images/homeButton.jpg').convert_alpha() screen.blit(homeImage, (770,570)) #draws a dark turquoise button with a shadow def darkButton(data,screen, xPos, yPos): pygame.draw.rect(screen, (0,0,0), (xPos+5,yPos+5,200,40)) pygame.draw.rect(screen, data.darkTurquoise, (xPos,yPos,200,40)) def newHighScoreRedrawAll(screen,data): if (data.newHighscore == True): data.newHighscoreCounter += 1 pygame.draw.rect(screen, (0,0,0), (205,205,400,200)) pygame.draw.rect(screen, data.darkTurquoise, (200,200,400,200)) highscoreText = data.font0.render("New Highscore!", 1, (255,255,255)) screen.blit(highscoreText, (210,280)) if (data.newHighscoreCounter >= 100): data.newHighscoreCounter = 0 data.newHighscore = False ########################################## # Home Page ########################################## def homePageMousePressed(data, x, y): # print ("highscore", data.highscore) # print("history",data.history) # print("best score game", data.bestGame) # print("best AI defeat", data.bestAIGame) # print("best AI speed", data.bestAISpeed) # print("best AI intel", data.bestAIIntelligence) # print("best AI defeat Time", data.bestAITime) data.lastPage = "HomePage" if (x >= 300) and (x <= 500) and (y >= 250) and (y <= 290): data.page = "SelectionPage" elif (data.visitedGame == True) and (x >= 300) and (x <= 500) and (y >= 300) and (y <= 340): data.page = "GamePage" elif (x >= 300) and (x <= 500) and (y >= 350) and (y <= 390): data.page = "HelpPage" elif (x >= 300) and (x <= 500) and (y >= 400) and (y <= 440): data.page = "highscorePage" elif (x >= 290) and (x <= 510) and (y >= 500) and (y <= 540): data.godGame = tetrisGame(True,200,0,10.99,5) data.godGame.init() data.page = "GodPage" def homePageMouseMotion(data, x, y): pass def homePageKeyPressed(keyCode, modifier, screen, data): data.lastPage = "HomePage" #'h' for help, 'n' for selection page (new game), 'c' for continue game if (keyCode == 104): data.page = "HelpPage" elif (keyCode == 110): data.page = "SelectionPage" elif (keyCode == 99) and (data.visitedGame == True): data.page = "GamePage" def homePageRedrawAll(screen, data): analyzeHistory(data) header(screen) #draws the gradient background for i in range(90): pygame.draw.rect(screen, (30,188-(2*i),255-(2*i)), (0,92+(6*i),800,6)) #New Play Button darkButton(data,screen,300,250) playText = data.font.render("Play New Game", 1, (146,148,150)) screen.blit(playText, (305, 255)) #Continue Play Button darkButton(data,screen,300,300) continueText = data.font.render("Continue Game", 1, (146,148,150)) screen.blit(continueText, (305, 305)) #Help Button darkButton(data,screen,300,350) helpText = data.font.render("Help", 1, (146,148,150)) screen.blit(helpText, (375, 355)) #Highscore Button darkButton(data,screen,300,400) highscoreText = data.font.render("HighScores", 1, (146,148,150)) screen.blit(highscoreText, (335, 405)) #God AI Button pygame.draw.rect(screen, (0,0,0), (295,505,220,40)) pygame.draw.rect(screen, data.darkTurquoise, (290,500,220,40)) godText = data.font.render("Spectate God AI", 1, (146,148,150)) screen.blit(godText, (300, 505)) ############################################ # Help Page ############################################ from textwrap import fill import string def helpPageMousePressed(data, x, y): if (x >= 300) and (x <= 500) and (y >= 530) and (y <= 570): data.page = data.lastPage elif (x >= 740) and (x <= 770) and (y >= 570) and (y <= 600): data.page = "HelpPage" elif (x >= 770) and (x <= 800) and (y >= 570) and (y <= 600): data.page = "HomePage" def helpPageKeyPressed(keyCode, modifier, screen, data): #'q' for quit to home page, 'b' for back if (keyCode == 113): data.page = "HomePage" elif (keyCode == 98): data.page = data.lastPage def helpPageMouseMotion(data, x, y): pass def helpPageRedrawAll(screen, data): header(screen) #draws the gradient background for i in range(90): pygame.draw.rect(screen, (30,188-(2*i),255-(2*i)), (0,92+(6*i),800,6)) #draws the Help header helpText = data.font0.render("Help", 1, (146,148,150)) darkHelpText = data.font0.render("Help", 1, (0,0,0)) screen.blit(darkHelpText, (351, 100)) screen.blit(helpText, (350, 100)) #displays "What is Tetris?" Paragraph whatParagraph(data,screen) #displays "How to Play?" Paragraph howParagraph(data,screen) #displays controls section controlsText = data.font.render("Controls", 1, (146,148,150)) darkControlsText = data.font.render("Controls", 1, (0,0,0)) screen.blit(darkControlsText, (101,350)) screen.blit(controlsText, (100,350)) #displayers One Player Controls onePlayerControls(data,screen) #displays Two Player Controls twoPlayerControls(data,screen) #return button pygame.draw.rect(screen, data.darkTurquoise, (300,530,200,40)) returnText = data.font.render("Return", 1, (146,148,150)) screen.blit(returnText, (355, 535)) helpHomeButton(screen) #displays the What is Tetris Paragraph def whatParagraph(data,screen): whatTetrisText = data.font.render("What is Tetris?", 1, (146,148,150)) darkWhatTetrisText = data.font.render("What is Tetris?", 1, (0,0,0)) screen.blit(darkWhatTetrisText, (101, 140)) screen.blit(whatTetrisText, (100, 140)) whatParagraph = "Tetris is one of the oldest and most recognizable arcade games ever made. Despite being over thirty years old, the game is still enjoyed by many who enjoy intellectual sport. Whether pursuing a record or simple mental stimulation, Tetris offers a fun experience for all!" splitWhat = (fill(whatParagraph,70).splitlines()) i = 0 for line in splitWhat: lineText = data.font1.render(line, 1, (198,200,202)) darkLineText = data.font1.render(line, 1, (0,0,0)) screen.blit(darkLineText, (121, 170+(20*i))) screen.blit(lineText, (120, 170+(20*i))) i += 1 #displays the How to Play paragraph def howParagraph(data,screen): playText = data.font.render("How to Play?", 1, (146,148,150)) darkPlayText = data.font.render("How to Play?", 1, (0,0,0)) screen.blit(darkPlayText, (101, 250)) screen.blit(playText, (100, 250)) howParagraph = "The goal of Tetris is to clear as many rows as possible as quickly as possible. A row is cleared when an entire row is filled with Tetris blocks. Tetris is played using the keyboard. " splitHow = (fill(howParagraph,70).splitlines()) i = 0 for line in splitHow: lineText = data.font1.render(line, 1, (198,200,202)) darkLineText = data.font1.render(line, 1, (0,0,0)) screen.blit(darkLineText, (121, 280+(20*i))) screen.blit(lineText, (120, 280+(20*i))) i += 1 #displays the one player controls def onePlayerControls(data,screen): onePlayerText = data.font.render("One Player", 1, (146,148,150)) darkOnePlayerText = data.font.render("One Player", 1, (0,0,0)) screen.blit(darkOnePlayerText, (121,380)) screen.blit(onePlayerText, (120,380)) oneControls = """UP - rotate piece LEFT - move left RIGHT - move right DOWN - move down SPACE - hard drop SHIFT - hold """ splitOneControls = oneControls.splitlines() i = 0 for line in splitOneControls: line = line.strip() lineText = data.font1.render(line, 1, (198,200,202)) darkLineText = data.font1.render(line, 1, (0,0,0)) screen.blit(darkLineText, (111, 410+(20*i))) screen.blit(lineText, (110, 410+(20*i))) i += 1 #displays the two player controls def twoPlayerControls(data,screen): twoPlayerText = data.font.render("Two Player", 1, (146,148,150)) darkTwoPlayerText = data.font.render("Two Player", 1, (0,0,0)) screen.blit(darkTwoPlayerText, (471,380)) screen.blit(twoPlayerText, (470,380)) #displays Player One controls playerOneControls(data,screen) #displays Player Two Controls playerTwoControls(data,screen) #displays the player one controls def playerOneControls(data,screen): playerOneText = data.font1.render("Player One", 1, (146,148,150)) darkPlayerOneText = data.font1.render("Player One", 1, (0,0,0)) screen.blit(darkPlayerOneText, (401,410)) screen.blit(playerOneText, (400,410)) playerOneControls = """W - rotate piece A - move left D - move right S - move down Z - hard drop SHIFT - hold """ splitPlayerOneControls = playerOneControls.splitlines() i = 0 for line in splitPlayerOneControls: line = line.strip() lineText = data.font2.render(line, 1, (198,200,202)) darkLineText = data.font2.render(line, 1, (0,0,0)) screen.blit(darkLineText, (381, 430+(15*i))) screen.blit(lineText, (380, 430+(15*i))) i += 1 #displays the player two controls: def playerTwoControls(data,screen): playerTwoText = data.font1.render("Player Two", 1, (146,148,150)) darkPlayerTwoText = data.font1.render("Player Two", 1, (0,0,0)) screen.blit(darkPlayerTwoText, (601,410)) screen.blit(playerTwoText, (600,410)) playerTwoControls = """UP - rotate piece LEFT - move left RIGHT - move right DOWN - move down SPACE - hard drop SHIFT - hold """ splitPlayerTwoControls = playerTwoControls.splitlines() i = 0 for line in splitPlayerTwoControls: line = line.strip() lineText = data.font2.render(line, 1, (198,200,202)) darkLineText = data.font2.render(line, 1, (0,0,0)) screen.blit(darkLineText, (601, 430+(15*i))) screen.blit(lineText, (600, 430+(15*i))) i += 1 ############################################# # Selection Page ############################################# def selectionPageMousePressed(data, x, y): data.lastPage = "SelectionPage" if (x >= 400) and (x <= 490) and (y >= 195) and (y <= 235): data.choseGameMode = "Solo" elif (x >= 500) and (x <= 590) and (y >= 195) and (y <= 235): data.choseGameMode = "VS" elif (x >= 300) and (x <= 440) and (y >= 295) and (y <= 335): data.playerOne = "Manual" elif (x >= 450) and (x <= 540) and (y >= 295) and (y <= 335): data.playerOne = "AI" elif (x >= 300) and (x <= 390) and (y >= 245) and (y <= 285): data.puzzleBoard = True elif (x >= 400) and (x <= 490) and (y >= 245) and (y <= 285): data.puzzleBoard = False elif (x >= 740) and (x <= 770) and (y >= 570) and (y <= 600): data.page = "HelpPage" elif (x >= 770) and (x <= 800) and (y >= 570) and (y <= 600): data.page = "HomePage" #modifies the AI bars if (data.playerOne == "AI"): if (x >= 300) and (x <= 400) and (y >= 350) and (y <= 370): data.oneAIIntelligence = min(((x - 300)//20) + 1,5) elif (x >= 530) and (x <= 730) and (y >= 350) and (y <= 370): data.oneAISpeed = min(((x - 530)//20) + 1,10) if (data.choseGameMode == "VS"): if (x >= 300) and (x <= 440) and (y >= 395) and (y <= 435): data.playerTwo = "Manual" elif (x >= 450) and (x <= 540) and (y >= 395) and (y <= 435): data.playerTwo = "AI" #modifies the AI bars elif (data.playerTwo == "AI"): if (x >= 300) and (x <= 400) and (y >= 450) and (y <= 470): data.twoAIIntelligence = min(((x - 300)//20) + 1,5) elif (x >= 530) and (x <= 730) and (y >= 450) and (y <= 470): data.twoAISpeed = min(((x - 530)//20) + 1,10) if (x >= 300) and (x <= 500) and (y >= 500) and (y <= 540): startGame(data) #creates the necessary tetris game objects and initializes them. #If it cannot start a game, returns False def startGame(data): if (data.choseGameMode != None) and (data.playerOne != None): if (data.choseGameMode == "VS") and (data.playerTwo == None): return False if (data.choseGameMode == "Solo"): data.game1 = None if (data.playerOne == "Manual"): data.game0 = tetrisGame(False, 200, 0, puzzleBoard=data.puzzleBoard) data.game0.init() elif (data.playerOne == "AI"): data.game0 = tetrisGame(True, 200, 0, data.oneAISpeed, data.oneAIIntelligence,data.puzzleBoard) data.game0.init() elif (data.choseGameMode == "VS"): #if there are two manual players, need to change the keys used if (data.playerOne == "Manual") and (data.playerTwo == "Manual"): data.game0 = tetrisGame(False,0,0, puzzleBoard=data.puzzleBoard,doubleManual=1) data.game0.init() data.game1 = tetrisGame(False,400,0, puzzleBoard=data.puzzleBoard,doubleManual=2) data.game1.init() else: if (data.playerOne == "Manual"): data.game0 = tetrisGame(False,0,0, puzzleBoard=data.puzzleBoard) data.game0.init() elif (data.playerOne == "AI"): data.game0 = tetrisGame(True,0,0,data.oneAISpeed,data.oneAIIntelligence,data.puzzleBoard) data.game0.init() if (data.playerTwo == "Manual"): data.game1 = tetrisGame(False,400,0, puzzleBoard=data.puzzleBoard) data.game1.init() elif (data.playerTwo == "AI"): data.game1 = tetrisGame(True,400,0,data.twoAISpeed,data.twoAIIntelligence,data.puzzleBoard) data.game1.init() data.recordedHistory = False data.page = "GamePage" else: return False def selectionPageMouseReleased(data, x, y): pass def selectionPageMouseDrag(data, x, y): pass def selectionPageMouseMotion(data, x, y): pass def selectionPageKeyPressed(keyCode, modifier, screen, data): #'h' for help, 'q' for quit to home page, 'ENTER' to start game if (keyCode == 104): data.page = "HelpPage" elif (keyCode == 113): data.page = "HomePage" #loads the game if you press enter and the game has the necessary info to start elif (keyCode == 13) and (startGame(data) != False): data.page = "GamePage" def selectionPageRedrawAll(screen, data): header(screen) #draws the gradient background for i in range(90): pygame.draw.rect(screen, (30,188-(2*i),255-(2*i)), (0,92+(6*i),800,6)) #draws the header startText = data.font.render("Start a New Game:", 1, (146,148,150)) darkStartText = data.font.render("Start a New Game:", 1, (0,0,0)) screen.blit(darkStartText, (291,100)) screen.blit(startText, (290, 100)) #Choose a game mode text gameModeText = data.font.render("Choose a game mode:", 1, (146,148,150)) darkGameModeText = data.font.render("Choose a game mode:", 1, (0,0,0)) screen.blit(darkGameModeText, (101,200)) screen.blit(gameModeText, (100, 200)) #AI Selection Text intelligenceText = data.font.render("Intelligence", 1, (146,148,150)) darkIntelligenceText = data.font.render("Intelligence", 1, (0,0,0)) speedText = data.font.render("Speed", 1, (146,148,150)) darkSpeedText = data.font.render("Speed", 1, (0,0,0)) #game mode buttons gameModeButtons(data,screen) #board selection buttons boardSelectionButtons(data,screen) #player 1 buttons playerOneButtons(data,screen) #Player One AI intelligence/speed selection playerOneAISelect(data,screen) #player 2 buttons if (data.choseGameMode == "VS"): playerTwoButtons(data,screen) #Start button pygame.draw.rect(screen, data.darkTurquoise, (300,500,200,40)) returnText = data.font.render("Start Game", 1, (146,148,150)) screen.blit(returnText, (325, 505)) helpHomeButton(screen) #draws the game mode buttons def gameModeButtons(data,screen): if (data.choseGameMode == "Solo"): pygame.draw.rect(screen, data.darkTurquoise, (400,195,90,40)) else: pygame.draw.rect(screen, data.turquoise, (400,195,90,40), 2) onePlayerText = data.font.render("Solo", 1, (146,148,150)) screen.blit(onePlayerText, (420, 200)) if (data.choseGameMode == "VS"): pygame.draw.rect(screen, data.darkTurquoise, (500,195,90,40)) else: pygame.draw.rect(screen, data.turquoise, (500,195,90,40), 2) twoPlayerText = data.font.render("Vs.", 1, (146,148,150)) screen.blit(twoPlayerText, (530, 200)) #draws the board selection buttons def boardSelectionButtons(data,screen): puzzleBoardText = data.font.render("Puzzle Board:", 1, (146,148,150)) darkPuzzleBoardText = data.font.render("Puzzle Board:", 1, (0,0,0)) screen.blit(darkPuzzleBoardText, (101,250)) screen.blit(puzzleBoardText, (100,250)) if (data.puzzleBoard == True): pygame.draw.rect(screen, data.darkTurquoise, (300,245,90,40)) else: pygame.draw.rect(screen, data.turquoise, (300,245,90,40), 2) yesText = data.font.render("Yes", 1, (146,148,150)) screen.blit(yesText, (320, 250)) if (data.puzzleBoard == False): pygame.draw.rect(screen, data.darkTurquoise, (400,245,90,40)) else: pygame.draw.rect(screen, data.turquoise, (400,245,90,40), 2) noText = data.font.render("No", 1, (146,148,150)) screen.blit(noText, (420,250)) #draws the player one buttons def playerOneButtons(data,screen): darkPlayerOneText = data.font.render("Player One:", 1, (0,0,0)) playerOneText = data.font.render("Player One:", 1, (146,148,150)) screen.blit(darkPlayerOneText, (101, 300)) screen.blit(playerOneText, (100, 300)) if (data.playerOne == "Manual"): pygame.draw.rect(screen, data.darkTurquoise, (300,295,140,40)) else: pygame.draw.rect(screen, data.turquoise, (300,295,140,40), 2) manualText = data.font.render("Manual", 1, (146,148,150)) screen.blit(manualText, (323, 300)) if (data.playerOne == "AI"): pygame.draw.rect(screen, data.darkTurquoise, (450,295,90,40)) else: pygame.draw.rect(screen, data.turquoise, (450,295,90,40), 2) AIText = data.font.render("AI", 1, (146,148,150)) screen.blit(AIText, (478, 300)) #draws the AI selection bars def playerOneAISelect(data,screen): if (data.playerOne == "AI"): for xPos in range(300,400,20): pygame.draw.rect(screen, data.turquoise, (xPos,350,20,20), 2) for filled in range(data.oneAIIntelligence): pygame.draw.rect(screen, data.darkTurquoise, (300+(20*filled),350,20,20)) intelligenceText = data.font.render("Intelligence", 1, (146,148,150)) darkIntelligenceText = data.font.render("Intelligence", 1, (0,0,0)) screen.blit(darkIntelligenceText, (141, 350)) screen.blit(intelligenceText, (140, 350)) for xPos in range(530,730,20): pygame.draw.rect(screen, data.turquoise, (xPos,350,20,20), 2) for filled in range(data.oneAISpeed): pygame.draw.rect(screen, data.darkTurquoise, (530+(20*filled),350,20,20)) speedText = data.font.render("Speed", 1, (146,148,150)) darkSpeedText = data.font.render("Speed", 1, (0,0,0)) screen.blit(darkSpeedText, (441, 350)) screen.blit(speedText, (440, 350)) #draws the player Two buttons def playerTwoButtons(data,screen): playerTwoText = data.font.render("Player Two:", 1, (146,148,150)) darkPlayerTwoText = data.font.render("Player Two:", 1, (0,0,0)) screen.blit(darkPlayerTwoText, (101, 400)) screen.blit(playerTwoText, (100, 400)) if (data.playerTwo == "Manual"): pygame.draw.rect(screen, data.darkTurquoise, (300,395,140,40)) else: pygame.draw.rect(screen, data.turquoise, (300,395,140,40), 2) manualText = data.font.render("Manual", 1, (146,148,150)) screen.blit(manualText, (323, 400)) if (data.playerTwo == "AI"): pygame.draw.rect(screen, data.darkTurquoise, (450,395,90,40)) else: pygame.draw.rect(screen, data.turquoise, (450,395,90,40), 2) AIText = data.font.render("AI", 1, (146,148,150)) screen.blit(AIText, (478, 400)) #Player Two AI intelligence/speed selection if (data.playerTwo == "AI"): playerTwoAISelect(data,screen) #player two AI select def playerTwoAISelect(data,screen): for xPos in range(300,400,20): pygame.draw.rect(screen, data.turquoise, (xPos,450,20,20), 2) for filled in range(data.twoAIIntelligence): pygame.draw.rect(screen, data.darkTurquoise, (300+(20*filled),450,20,20)) intelligenceText = data.font.render("Intelligence", 1, (146,148,150)) darkIntelligenceText = data.font.render("Intelligence", 1, (0,0,0)) screen.blit(darkIntelligenceText, (141, 450)) screen.blit(intelligenceText, (140, 450)) for xPos in range(530,730,20): pygame.draw.rect(screen, data.turquoise, (xPos,450,20,20), 2) for filled in range(data.twoAISpeed): pygame.draw.rect(screen, data.darkTurquoise, (530+(20*filled),450,20,20)) speedText = data.font.render("Speed", 1, (146,148,150)) darkSpeedText = data.font.render("Speed", 1, (0,0,0)) screen.blit(darkSpeedText, (441, 450)) screen.blit(speedText, (440, 450)) ############################################## # Game Page ############################################## def gamePageTimerFired(time, data): data.game0.timerFired(time) #if the game mode is Vs. send lines between games if (data.game1 != None): data.game1.timerFired(time) #takes how many lines have been cleared by the opposing player data.sendGame0 = data.game1.sendGarbage() data.sendGame1 = data.game0.sendGarbage() #updates how many lines have been cleared data.game0.removeFullRows() data.game1.removeFullRows() #if lines were cleared, send the lines to the appropriate player if (data.sendGame0 > 0): data.game0.addGarbage(data.sendGame0) data.sendGame0 = 0 if (data.sendGame1 > 0): data.game1.addGarbage(data.sendGame1) data.sendGame1 = 0 #if one of the players lost, then display appropriate win/lose screens data.gameOver0 = data.game0.gameLost() data.gameOver1 = data.game1.gameLost() if (data.gameOver0 == True): data.game1.gameWon() history1 = data.game0.addToHistory() history2 = data.game1.addToHistory() if (history1 != None): if (history1[0] == "Manual") and (history1[2] >= data.highscore): data.newHighscore = True if (history2 != None): if (history2[0] == "Manual") and (history2[2] >= data.highscore): data.newHighscore = True data.recordedHistory = recordHistory(data.recordedHistory, history1, history2) if (data.gameOver1 == True): data.game0.gameWon() history1 = data.game0.addToHistory() history2 = data.game1.addToHistory() if (history1 != None): if (history1[0] == "Manual") and (history1[2] >= data.highscore): data.newHighscore = True if (history2 != None): if (history2[0] == "Manual") and (history2[2] >= data.highscore): data.newHighscore = True data.recordedHistory = recordHistory(data.recordedHistory, history1, history2) else: if (data.game0.gameLost() == True): history = data.game0.addToHistory() if (history != None): if (history[0] == "Manual") and (history[2] >= data.highscore): data.newHighscore = True data.recordedHistory = recordHistory(data.recordedHistory, history) def gamePageMousePressed(data, x, y): data.lastPage = "GamePage" data.visitedGame = True if (x >= 740) and (x <= 770) and (y >= 570) and (y <= 600): data.page = "HelpPage" elif (x >= 770) and (x <= 800) and (y >= 570) and (y <= 600): data.page = "HomePage" def gamePageKeyPressed(keyCode, modifier, screen, data): data.visitedGame = True data.lastPage = "GamePage" #'h' for help, 'q' for quit if (keyCode == 104): data.page = "HelpPage" elif (keyCode == 113): data.page = "HomePage" data.game0.keyPressed(keyCode, modifier, screen) if (data.game1 != None): data.game1.keyPressed(keyCode, modifier, screen) def gamePageRedrawAll(screen, data): #draws gradient for i in range(90): pygame.draw.rect(screen, (30,188-(2*i),255-(2*i)), (0,(7*i),800,7)) #draws the game screens data.game0.redrawAll(screen) if (data.game1 != None) and (data.choseGameMode == "VS"): data.game1.redrawAll(screen) helpHomeButton(screen) ############################ # God Page ############################ def godPageTimerFired(time, data): data.godGame.timerFired(time) def godPageMousePressed(data, x, y): data.lastPage = "GodPage" if (x >= 740) and (x <= 770) and (y >= 570) and (y <= 600): data.page = "HelpPage" elif (x >= 770) and (x <= 800) and (y >= 570) and (y <= 600): data.page = "HomePage" def godPageKeyPressed(keyCode, modifier, screen, data): data.lastPage = "GodPage" #'h' for help, 'q' for quit if (keyCode == 104): data.page = "HelpPage" elif (keyCode == 113): data.page = "HomePage" def godPageRedrawAll(screen, data): #draws the gradient for i in range(90): pygame.draw.rect(screen, (30,188-(2*i),255-(2*i)), (0,(7*i),800,7)) #draws the game data.godGame.redrawAll(screen) helpHomeButton(screen) ############################### # Highscores/history page ############################### def highscorePageMousePressed(data, x, y): data.lastPage = "highscorePage" if (x >= 740) and (x <= 770) and (y >= 570) and (y <= 600): data.page = "HelpPage" elif (x >= 770) and (x <= 800) and (y >= 570) and (y <= 600): data.page = "HomePage" def highscorePageKeyPressed(keyCode, modifier, screen, data): data.lastPage = "GodPage" #'h' for help, 'q' for quit if (keyCode == 104): data.page = "HelpPage" elif (keyCode == 113): data.page = "HomePage" def highscorePageRedrawAll(screen,data): #draws the gradient for i in range(90): pygame.draw.rect(screen, (30,188-(2*i),255-(2*i)), (0,(7*i),800,7)) header(screen) highscoreText = data.font.render("Highscore: " + str(data.highscore), 1, (146,148,150)) darkHighscoreText = data.font.render("Highscore: " + str(data.highscore), 1, (0,0,0)) screen.blit(darkHighscoreText, (281, 150)) screen.blit(highscoreText, (280, 150)) bestAIText = data.font.render("Best AI Defeated:", 1, (146,148,150)) darkBestAIText = data.font.render("Best AI Defeated: ", 1, (0,0,0)) screen.blit(darkBestAIText, (141, 250)) screen.blit(bestAIText, (140, 250)) bestIntelligenceText = data.font1.render("Intelligence: " + str(data.bestAIIntelligence), 1, (146,148,150)) darkBestIntelligenceText = data.font1.render("Intelligence: " + str(data.bestAIIntelligence), 1, (0,0,0)) screen.blit(darkBestIntelligenceText, (241, 280)) screen.blit(bestIntelligenceText, (240, 280)) bestSpeedText = data.font1.render("Speed: " + str(data.bestAISpeed), 1, (146,148,150)) darkBestSpeedText = data.font1.render("Speed: " + str(data.bestAISpeed), 1, (0,0,0)) screen.blit(darkBestSpeedText, (241, 300)) screen.blit(bestSpeedText, (240, 300)) bestTimeText = data.font1.render("Time Taken: " + str(data.bestAITime), 1, (146,148,150)) darkBestTimeText = data.font1.render("Time Taken: " + str(data.bestAITime), 1, (0,0,0)) screen.blit(darkBestTimeText, (241, 320)) screen.blit(bestTimeText, (240, 320)) helpHomeButton(screen) def main(): run() ######################################## # genetic algorithm data collection ####################################### #testAIs(5, 5, 2.5, 2.5, 5, 1000) #testAIs(4, 6.320137824311881, 2.4387815064452654, 3.6389088406045977, 4.544367801566513, 1000) #testAIs(2, 6.130166771222496, 2.0654103293995507, 3.59707624497361, 4.672541911687853, 2000) #testAIs(1, 5.991736855642347, 2.003127291362843, 3.378532554163095, 4.6412455071804315, 4000) #testAIs(0.5, 5.901419259597659, 1.98171354255989, 3.4414534763120814, 4.635329773443859, 8000) #these iterations test using the random board feature, where the board is filled with random #tiles at the start instead of being empty #testAIs(0.25, 5.899202540171207, 1.9682746621983596, 3.408388891059141, 4.615364672019854, 100, 9) #testAIs(0.125, 5.933157019361572, 1.9333422585880735, 3.4047164812712967, 4.630153044753769, 100, 8) #testAIs(0.0625, 5.937421495482078, 1.9231952984247034, 3.3927635617811003, 4.634046272707411, 100, 7) #testAIs(0, 5.9288669341469555, 1.9162926946223073, 3.3878742140370637, 4.641363224169059, 100, 11) #tests the AI using a genetic algorithm, starts with random weights to calculate #the board score and returns the final score achieved by the AI given the weights. #If the score is less than the linecap, then the weights did not survive to the #line cap, so we ignore them. Note that the score caps at 1000 (otherwise some #would probably run forever) so the weights that don't make it to 1000 are removed #from the "gene pool". def testAIs(rangeVal, startHole, startColHeight, startGap, startClear, lineCap, randomRow = 19): import random scoresList = [] badWeights = [] for i in range(200): #picks random weights for the relevant variables holeWeight = random.uniform(startHole-rangeVal, startHole+rangeVal) minColHeight = max(startColHeight-rangeVal,0) colHeightWeight = random.uniform(minColHeight, startColHeight+rangeVal) gapWeight = random.uniform(startGap-rangeVal,startGap+rangeVal) clearWeight = random.uniform(startClear-rangeVal,startClear+rangeVal) #runs the AI using the given weights and returns the final score acheived aiTest = geneticAlgorithm(holeWeight,colHeightWeight,gapWeight,clearWeight, lineCap, randomRow) score = aiTest.run() #if the AI gets to the lineCap, then it is an acceptable list of weights for #the AI calculations, otherwise, they are bad if (score == None): break elif (score >= lineCap): scoresList.append([holeWeight,colHeightWeight,gapWeight,clearWeight]) else: badWeights.append([holeWeight,colHeightWeight,gapWeight,clearWeight]) print(scoresList) print(i) #neatly prints the output of the necessary data print("bad", badWeights) print("good\n", len(scoresList), "\n", scoresList) if __name__ == '__main__': main()
# -*- coding:utf8 -*- """ๅšๅฎž็ป“""" from protocol import ProtocolTranslator from result import Location, Identity import datetime import logging import struct class A5(ProtocolTranslator): """A5""" @staticmethod def sum(s): a = "00" for i in range(0, len(s), 2): tmp = (int(a, 16)) ^ (int(s[i:i + 2], 16)) a = hex(tmp)[2:].zfill(2) return a @staticmethod def imei(a): b = [int(a[:2], 16), int(a[2:4], 16), int(a[4:6], 16), int(a[6:], 16)] c = map(lambda x: x > int('80', 16) and (1, x - int('80', 16)) or (0, x), b) d = ['%d' % i[0] for i in c] e = ['%02d' % i[1] for i in c] return '%s%s' % (int(''.join(d), 2) + 130, ''.join(e)) def main_signaling(self, s): return s[4:6] def build_response(self, s, ms): res = "210005" + s[-4:-2] + s[4:6] + s[18:20] restr = "2929" + res + A5.sum(res) + "0d" return restr.upper() def on_ms_80(self, s): imei = A5.imei(s[10:18]) # log timestr = s[18:30] logstr = s[30:38] latstr = s[38:46] speedstr = s[46:50] dirstr = s[50:54] Bstr = s[64:66] submit_time = datetime.datetime.now() lat = float(logstr[0:3]) + float(logstr[3:]) / 60000 lng = float(latstr[0:3]) + float(latstr[3:]) / 60000 speed = float(float(speedstr) / 3.6) bearing = int(dirstr) alerts = [] B = int(Bstr, 16) if B&8 == 8: alerts.append(u'่ขซๆ‹†้™ค') data_time = submit_time try: data_time = datetime.datetime(int("20" + str(int(timestr[:2]))), int(timestr[2:4]), int(timestr[4:6]), int(timestr[6:8]), int(timestr[8:10]), int(timestr[10:12])) except Exception as ex: logging.debug("Wrong time format time=%s imei=%s", timestr, imei) return Location( imei=imei, time=data_time, lng=lng, lat=lat, speed=speed, bearing=bearing, altitude=0, alerts=alerts, jit=True, ) def on_ms_89(self, s): ret = self.on_ms_80(s) ret.jit = False return ret def on_ms_d8(self, s): imei = A5.imei(s[10:18]) return Identity(imei) def on_ms_e1(self, s): imei = A5.imei(s[10:18]) return Identity(imei) class Km(ProtocolTranslator): @staticmethod def imei(s): return s[10:22] @staticmethod def msg_munber(s): return s[22:26] @staticmethod def crc(s): f = None r = None for i in range(0, len(s), 2): c = int(s[i:i + 2], 16) if f == None: r = c else: r ^= c f = c return r @staticmethod def wrap(msg_id, imei, number, msg_body): msg_body_attrs = len(msg_body)&int('00000001111111111',2) body = '%s%s%s%s%s'%(msg_id, msg_body_attrs, imei, number, msg_body) crc = '%x' % Km.crc(body) r = '7e%s%s7e' % (body, crc) print 'response', r return r def main_signaling(self, s): return s[2:6] def on_main_signaling(self, ms, s): print ms, s def on_ms_0100(self, s): # 7e010000210145304343740003002c012f37303131314b4d2d30312020203030303030303001d4c1423838383838437e # ๆถˆๆฏID[2:6] ๆถˆๆฏไฝ“ๅฑžๆ€ง[6:10] ็ปˆ็ซฏๆ‰‹ๆœบๅท[10:22] ๆถˆๆฏๆตๆฐดๅท[22:26] ็œๅŸŸID ๅธ‚ๅŽฟๅŸŸID ๅˆถ้€ ๅ•†ID ็ปˆ็ซฏๅž‹ๅท ็ปˆ็ซฏID ่ฝฆ็‰Œ้ขœ่‰ฒ ่ฝฆ็‰Œ # 7e 0100 0021 014530434374 0003 002c 012f 3730313131 4b4d2d3031202020 30303030303030 01 d4c1423838383838 43 7e imei = Km.imei(s) return Identity(imei) def on_ms_resp_0100(self, s): imei = Km.imei(s) num = Km.msg_munber(s) # ๆถˆๆฏๅคด ๆถˆๆฏไฝ“ ๆฃ€้ชŒ็  #7e 8100 0021 014530434374 0000 [num 01 123456] 45 7e return Km.wrap(msg_id='8100', imei=imei, number='0000', msg_body=num+'00'+'123456')
"""setup.py""" from distutils.core import setup setup(name='numutil', version='0.1.0', description='Utilities for parsing strings into numbers, and printing numbers as pretty strings', author='Naftali Harris', author_email='naftaliharris@gmail.com', url='www.naftaliharris.com', packages=['.'], keywords = ["number", "parse", "text", 'user-entered'], classifiers = [ "Programming Language :: Python", "Development Status :: 2 - Pre-Alpha", "Environment :: Other Environment", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Natural Language :: English", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Linguistic", "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries", ], long_description = """\ Convert between numbers and strings ----------------------------------- Strings to Numbers:: >>> from numutil import str2num, num2str >>> str2num('1.3 million') 1300000 >>> str2num('three and a half') Fraction(7, 2) >>> str2num('123,456.789') 123456.789 Numbers to Strings:: >>> num2str(1234567, style='newspaper') '1.23 million' >>> num2str(1234567, style='words') 'one million, two hundred thirty four thousand, five hundred sixty seven' numutil might be useful for people mining data from text, or for people running web apps that need to parse numbers from user-entered strings, or render numbers in a user-friendly format. """ )
import urllib import urllib2 import re try: url = 'http://pythonprogramming.net/parse-website-using-regular-expressions-urllib/' #values = {'q' : 'sql'} #data = urllib.urlencode(values) headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, None, headers = headers) resp = urllib2.urlopen(req) respdata = resp.read() paragraph = re.findall(r'<p>(.*?)</p>', str(respdata)) savefile = open('respfile.txt', 'a') for eachp in paragraph: savefile.write(eachp) savefile.close() except Exception, e: print(str(e))
#!/usr/bin/env python import MKDatabase from MKFlowMessage import FBconvertLong # Main Class class MKFlowCommunication(): def __init__(self): self.nullNode = MKFlowNode(-1) self.node_numbers = [] self.nodes = [] def addNode(self, number): if not self.isNode(number): self.node_numbers += [number] node = MKFlowNode(number) self.nodes += [node] def isNode(self, number): return (number in self.node_numbers) def getNode(self, number): if self.isNode(number): id = self.node_numbers.index(number) node = self.nodes[id] return node else: return self.nullNode def Node(self, number): if not self.isNode(number): self.addNode(number) return self.getNode(number) class MKFlowNode(): def __init__(self, node): self.node = node self.nullSequence = MKFlowSequence(-1) self.sequence_numbers = [] self.sequences = [] def getNumber(self): return self.node def addSequence(self, number): if not self.isSequence(number): self.sequence_numbers += [number] sequence = MKFlowSequence(number) self.sequences += [sequence] def isSequence(self, number): return (number in self.sequence_numbers) def getSequence(self, number): if self.isSequence(number): id = self.sequence_numbers.index(number) sequence = self.sequences[id] return sequence else: return self.nullSequence def Sequence(self, number): if not self.isSequence(number): self.addSequence(number) return self.getSequence(number) class MKFlowSequence(): def __init__(self, sequence): self.sequence = sequence self.nullChild = MKFlowModbus(-1) self.reset() def reset(self): self.parameter_ids = [] self.parameters = [] self.hasAnswer = False self.hasRequest = False self.RequestHasValue = False self.isAnalysed = False self.isStatus = False self.isError = False self.isValid = False def setReadRequest(self, Message): self.Request = Message.getSubType() self.hasRequest = True self.hasAnswer = False self.RequestHasValue = False self.timeRequest = Message.getSeconds() def setWriteRequest(self, Message): self.setReadRequest(Message) self.RequestHasValue = True def setStatus(self, Message): self.setAnswer(Message) self.isStatus = True def setError(self, Message): self.setAnswer(Message) self.isError = True def setAnswer(self, Message): self.Answer = Message.getSubType() self.timeAnswer = Message.getSeconds() self.hasAnswer = True self.isStatus = False self.isError = False def check(self): if self.hasAnswer and self.hasRequest: if abs(self.timeAnswer - self.timeRequest) > 10: return False else: return True else: return False def addParameter(self, index): if not self.isParameter(index): self.parameter_ids += [index] Parameter = MKFlowModbus(index) self.parameters += [Parameter] def isParameter(self, index): return index in self.parameter_ids def getParameter(self, index): if self.isParameter(index): id = self.parameter_ids.index(index) Parameter = self.parameters[id] return Parameter else: return self.nullChild def Parameter(self, index): if not self.isParameter(index): self.addParameter(index) return self.getParameter(index) def analyse(self): if self.check(): # Process Request for process in self.Request.process: for parameter in process.Parameter: self.Parameter(parameter.getIndex()).setNumber(parameter.getNumber()) self.Parameter(parameter.getIndex()).setProcess(parameter.getProcess()) self.Parameter(parameter.getIndex()).setName(parameter.getHuman()) self.Parameter(parameter.getIndex()).setLength(parameter.getLength()) if self.RequestHasValue: self.Parameter(parameter.getIndex()).setValue(parameter.getValue()) self.Parameter(parameter.getIndex()).setDataType(parameter.getDataType()) # Process Answer if not self.RequestHasValue and not self.isStatus and not self.isError: for process in self.Answer.process: for parameter in process.Parameter: self.Parameter(parameter.getIndex()).setValue(parameter.getValue()) self.Parameter(parameter.getIndex()).setDataType(parameter.getDataType()) # Answer with Status or Error and set valid self.valid = True self.analyseStatus() self.analyseError() self.isAnalysed = True def analyseStatus(self): if self.isStatus: if self.Answer.getStatus() == 0: # no error self.valid = True elif self.Answer.getStatus() > 3 and self.Answer.getStatus() < 8: # Parameter Error where = self.Answer.getIndex() count = 4 for index in self.parameter_ids: Parameter = self.getParameter(index) if not self.RequestHasValue: Parameter.setInvalid() if where == count: self.error = "Status: %s\t Parameter: %s" % (self.Answer.getHuman(), Parameter.getName()) Parameter.setError(self.Answer.getHuman()) count += int(Parameter.getLength()) else: self.error = self.Answer.getHuman() self.valid = False def analyseError(self): if self.isError: self.error = self.Answer.getText() self.valid = False if not self.valid: for index in self.parameter_ids: Parameter = self.getParameter(index) Parameter.setError(self.error) def output(self): if self.check(): if not self.isAnalysed: self.analyse() for index in self.parameter_ids: Parameter = self.getParameter(index) try: Parameter.stdout() except: self.stdout() raise ValueError("error in MKFlowCommunication ModbusClass stdout") def save(self, Database, instrument = 0): if self.check(): reset = True if not self.isAnalysed: self.analyse() for index in self.parameter_ids: Parameter = self.getParameter(index) try: if not Parameter.isInvalid(): valid = True proc = Parameter.getProcess() fbnr = Parameter.getNumber() name = Parameter.getName() value = Parameter.getValue() dataType = Parameter.getDataType() time = self.timeAnswer parameter = Parameter.getName() reset = Database.setFlowbus(instrument, proc, fbnr, dataType, value, time, parameter) except: self.stdout() print "error storing parameter." reset = False if reset: self.reset() else: print "Sequence not cleared." def stdout(self): print "--- sequence: %i ---" % self.sequence print "---- parameters: %s ----" % self.parameter_ids if self.hasRequest: print "---- request ----" self.Request.stdout() if self.hasAnswer: print "---- answer ----" self.Answer.stdout() class MKFlowModbus(): def __init__(self, index): self.index = index self.invalid = False self.error = '' self.value = None self.human = '' self.dataType = 'invalid' # readybility. store as string self.length = 0 def setProcess(self, process): self.process = process def getProcess(self): return self.process def setNumber(self, number): self.number = number def getNumber(self): return self.number def setValue(self, value): self.value = value def getValue(self): return self.value def setDataType(self, dataType): self.dataType = dataType def getDataType(self): return self.dataType def setName(self, string): self.human = string def getName(self): return self.human def setInvalid(self): self.invalid = True def setLength(self, length): self.length = length def getLength(self): return self.length def setError(self, error): self.error = error self.setInvalid() def isInvalid(self): if self.invalid: return True else: return False def stdout(self): returnarray = [self.isInvalid(), self.getProcess(), self.getNumber(), self.getName()] if not self.invalid: returnarray += [FBconvertLong(self.getProcess(), self.getNumber(), self.getValue())] else: returnarray += [self.error] print '\t'.join(str(i) for i in returnarray)
import pytest from granula.config import Config from tests.fixtures import get_fixture_path from tests.stubs.serializer import CustomSerializer from tests.utils import replace_environment @pytest.mark.parametrize( ['serializer', 'directory', 'environment', 'expected'], [ ( 'yaml', 'config/yaml', { 'VARIABLE': 'variable', 'FIRST': 'first', }, { 'parameters': { 'default': 10, 'nested': 'extended', 'environment': 'variable', 'reference': 'value', }, 'new': 'value', 'list': [ 'first', 'string', ], } ), ( 'json', 'config/json', { 'SECRET': 'secret', 'ONE': 'one', }, { 'list': [ 'one', 'two', ], 'default': 15, 'parameters': { 'nested': 'extended', 'reference': 'value', 'variable': 'secret', }, }, ), ( CustomSerializer(), 'config/custom', { 'SECRET': 'secret', }, { 'DEFAULT': 'extended', 'ENVIRONMENT': 'secret', 'MISS': '10', 'NEW': 'value', 'REFERENCE': '10', }, ), ] ) def test_from_directory(serializer, directory, environment, expected): path = get_fixture_path(directory) with replace_environment(variables=environment): config = Config.from_directory(directory=path, serializer=serializer) assert config == expected def test_from_file(): pass
"""Class for sorting algorithms.""" ''' class Sortings(object): """A general class for sorting algorithms.""" def __init__(self, sort_list=None): """Take in empty list.""" if sort_list is None: self.sort_list = [] else: self.sort_list = sort_list def bubble_sort(self): """Bubble sort.""" for i in range(len(self.sort_list) - 1): for j in range(len(self.sort_list) - 1): if self.sort_list[j] > self.sort_list[j + 1]: temp = self.sort_list[j] self.sort_list[j] = self.sort_list[j + 1] self.sort_list[j + 1] = temp def insert_sort(self): """Insertion sort.""" for i in range(len(self.sort_list)): k = self.sort_list[i] j = i while j > 0 and k < self.sort_list[j-1]: self.sort_list[j] = self.sort_list[j-1] j -= 1 self.sort_list[j] = k def merge_sort(self, arr): """Merge Sort.""" if len(arr) <=1: return arr mid = len(arr)//2 left = arr[mid:] right = arr[:mid] left = self.merge_sort(left) right = self.merge_sort(right) output = [] while left and right: if right[0] < left[0]: output.append(right[0]) right.pop(0) elif left[0] < right[0]: output.append(left[0]) left.pop(0) return output + left + right def quick_sort(self, arr): """Quick sort.""" if len(arr) <= 1: return arr pivot = arr[0] left = [] right = [] for i in arr[1:]: if i < pivot: left.append(i) elif i > pivot: right.append(i) left = self.quick_sort(left) right = self.quick_sort(right) return left + [pivot] + right def radix_sort(self, arr): x = len(str(max(arr))) str_list = ['%0*d' % (x, i) for i in arr] print(str_list) ''' if __name__ == '__main__': s = Sortings([]) arr = [5, 102, 48, 10, 2, 500] t = s.radix_sort(arr) print(t) #tl = s.insert_sort() #print(s.sort_list)
import unittest from reinvent_scoring.scoring.enums import ScoringFunctionComponentNameEnum from reinvent_scoring.scoring.enums import ScoringFunctionNameEnum from reinvent_scoring.scoring.scoring_function_factory import ScoringFunctionFactory from reinvent_scoring.scoring.scoring_function_parameters import ScoringFunctionParameters from unittest_reinvent.fixtures.test_data import BUTANE, CELECOXIB, HEXANE class TestParallelScoringFunctionFactory(unittest.TestCase): def setUp(self): enum = ScoringFunctionComponentNameEnum() ts_parameters = dict(component_type=enum.TANIMOTO_SIMILARITY, name="tanimoto_similarity", weight=1., specific_parameters={"smiles":[BUTANE, CELECOXIB]}) sf_enum = ScoringFunctionNameEnum() sf_parameters = ScoringFunctionParameters(name=sf_enum.CUSTOM_PRODUCT, parameters=[ts_parameters], parallel=True) self.sf_instance = ScoringFunctionFactory(sf_parameters=sf_parameters) def test_sf_factory_1(self): result = self.sf_instance.get_final_score([BUTANE]) self.assertEqual(1., result.total_score) def test_sf_factory_2(self): result = self.sf_instance.get_final_score([HEXANE]) self.assertAlmostEqual(result.total_score[0], 0.529, 3)
import requests import json import settings as s class Bitmex(object): def __init__(self): self.trade_currency = "XBT" self.ask_price = 0 self.bid_price = 0 self.order_id_prefix = "lee_bot" self.symbol = s.SYMBOL self.BASE_URL = "https://www.bitmex.com/api/v1/" def get_historical_data(self, tick='1m', count=400): # last one hour data with latest one in the end url = self.BASE_URL + "trade/bucketed?binSize={}&partial=false&symbol={}&count={}&reverse=true". \ format(tick, self.trade_currency, count) r = json.loads(requests.get(url).text) lst = [] # configure result into suitable data type try: dict_key = ["open", "close", "high", "low", "timestamp"] for item in r: d = { dict_key[0]: item[dict_key[0]], dict_key[1]: item[dict_key[1]], dict_key[2]: item[dict_key[2]], dict_key[3]: item[dict_key[3]], dict_key[4]: item[dict_key[4]] } lst.append(d) return lst[::-1] except KeyError as e: pass except TypeError as e: pass except Exception as e: pass # b = Bitmex() # b.get_price() # # print(b.ask_price, b.bid_price) # b.place_order(price=b.bid_price, side='Buy', orderQty=100, type="Market") # # b.cancel_all_order() # print(b.get_historical_data())
from flask_appbuilder.widgets import RenderTemplateWidget class ChartWidget(RenderTemplateWidget): template = 'appbuilder/general/widgets/chart.html' class DirectChartWidget(RenderTemplateWidget): template = 'appbuilder/general/widgets/direct_chart.html' class MultipleChartWidget(RenderTemplateWidget): template = 'appbuilder/general/widgets/multiple_chart.html'
from dataclasses import dataclass from typing import Any from pyckaxe.lib.resource.loot_table.loot_table import LootTable __all__ = ("LootTableSerializer",) # @implements ResourceSerializer[LootTable, Any] @dataclass class LootTableSerializer: # @implements ResourceSerializer[LootTable, Any] def __call__(self, resource: LootTable) -> Any: return self.serialize(resource) def serialize(self, loot_table: LootTable) -> Any: return loot_table.data
from typing import Dict, Optional, Union import numpy as np from werkzeug import ImmutableMultiDict from .endpoint import Endpoint def predict(model, input_data: Union[Dict, ImmutableMultiDict], config: Endpoint): # new model if hasattr(model, "public_inputs"): sample = {} for k, v in dict(input_data).items(): try: # GET request arguments are strings. If they should in fact be number, we try to convert them here sample[k] = float(v) except ValueError: # Some arguments are in fact strings. So we let them. sample[k] = v res = model.predict(sample, "raw") return res.to_dict("records")[0] sample = config.process_input(input_data) vec = np.array(sample).reshape(1, -1) res = model.predict(vec) return config.process_output(res)
from .multipart import ( FormParser, MultipartParser, QuerystringParser, OctetStreamParser, create_form_parser, parse_form, ) __version__ == "2022.1"
import matplotlib.pyplot as plt def _get_labels(l, hours, items): """ Formats the labels, undocumented on purpose, not relevant. """ total_sum = sum(l) it = 0 temp_list = [] for p in l: pct = (p / total_sum) * 100 temp_list.append(f'{items[it]} - {hours[it]} ({round(pct, 2)}%)') it += 1 return temp_list class PieChart: """ Matplotlib pie chart that comes with the library for the people that just want a quick insight of their data. Parameters ---------- _dict : :class:`dict` The dictionary containing all the data that will be showed in the piechart. num : :class:`int` Number of max elements that will be shown in the chart dates : :class:`datetime.date` The dates if the data comes from a slice. Just for tagging purposes. Note ---- You are suppose to access this by :meth:`.pie_chart` """ def __init__(self, _dict, num: int, dates=None): self.dict = _dict self.fig = None items = list(self.dict.keys()) data = list(map(lambda x: x / 3600, self.dict.values())) item1 = items[:num] data1 = data[:num] hours = list(map(lambda x: f'{round(x, 2)}h', data1)) labels = _get_labels(data1, hours, item1) fig, ax = plt.subplots(figsize=(5, 5), subplot_kw=dict(aspect='equal')) wedges, texts = ax.pie(data1) ax.legend(wedges, labels, title=f"|{dates[0].date} | {dates[1].date}|", loc=2, bbox_to_anchor=(1, 0) ) def show(self) -> None: """ Shows the figure. :class:`matplotlib.pyplot.show` """ self.fig = plt.gcf() plt.show() def save(self, fp: str) -> None: """ Saves the figure in the given path. :class:`matplotlib.pyplot.save(fp)` :param fp: :class:`str` """ fig = self.fig if self.fig else plt.gcf() fig.savefig(fp, bbox_inches='tight', format='png', dpi=100) def __repr__(self): return f"class <'{self.__class__.__name__}{self.dict}'>"
import os import unittest import zope.testrunner from zope import component from sparc.testing.fixture import test_suite_mixin from sparc.cli.testing import SPARC_CLI_INTEGRATION_LAYER import sparc.cli from sparc.cli.exceptions import CliTooManyAtempts, CliInvalidInput class SparcUtilsCliInputTestCase(unittest.TestCase): layer = SPARC_CLI_INTEGRATION_LAYER sm = component.getSiteManager() def test_factory(self): asker = component.createObject(u"sparc.utils.cli.input") self.assertTrue(sparc.cli.ICliInput.providedBy(asker)) def test_basic_input(self): asker = component.createObject(u"sparc.utils.cli.input") asker._raw_input = lambda :u"test1" asker._print = lambda x: None self.assertEquals(u"test1", asker.ask(u"this is a test")) def test_required(self): asker = component.createObject(u"sparc.utils.cli.input") asker._raw_input = lambda :u"" asker._print = lambda x: None with self.assertRaises(CliTooManyAtempts): asker.ask(u"this should raise exception", required=True, tries=1) asker._raw_input = lambda :u"valid response" self.assertEquals(u"valid response", asker.ask(u"valid response", required=True, tries=1)) _attempts = 0 _succeed = 0 def _raw_input(self): SparcUtilsCliInputTestCase._attempts += 1 return u"" if SparcUtilsCliInputTestCase._succeed > SparcUtilsCliInputTestCase._attempts else u'a' def test_tries(self): asker = component.createObject(u"sparc.utils.cli.input") asker._raw_input = self._raw_input asker._print = lambda x: None SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 1 self.assertEquals(u"a", asker.ask(u"valid response", required=True, tries=2)) self.assertEquals(SparcUtilsCliInputTestCase._attempts, 1) SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 2 self.assertEquals(u"a", asker.ask(u"valid response", required=True, tries=2)) self.assertEquals(SparcUtilsCliInputTestCase._attempts, 2) SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 3 #one more than tries...will fail with self.assertRaises(CliTooManyAtempts): asker.ask(u"this should raise exception", required=True, tries=2) self.assertEquals(SparcUtilsCliInputTestCase._attempts, 2) def test_selections(self): asker = component.createObject(u"sparc.utils.cli.input") asker._raw_input = self._raw_input asker._print = lambda x: None selections = [('a','b','c')] SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 1 self.assertEquals(u"c", asker.ask(u"valid response", tries=1, selections=selections)) SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 2 with self.assertRaises(CliTooManyAtempts): asker.ask(u"this should raise exception", tries=1, selections=selections) selections = [('a','b')] SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 1 self.assertEquals(u"b", asker.ask(u"valid response", tries=1, selections=selections)) def test_constraints(self): asker = component.createObject(u"sparc.utils.cli.input") asker._raw_input = self._raw_input asker._print = lambda x: None SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 1 self.assertEquals(u"a", asker.ask(u"valid response", required=True, tries=2)) self.assertEquals(SparcUtilsCliInputTestCase._attempts, 1) def _contraint(input_): raise CliInvalidInput('testing') SparcUtilsCliInputTestCase._attempts = 0 SparcUtilsCliInputTestCase._succeed = 1 with self.assertRaises(CliTooManyAtempts): asker.ask(u"this should raise exception", constraints=[_contraint]) class test_suite(test_suite_mixin): layer = SPARC_CLI_INTEGRATION_LAYER package = 'sparc.cli' module = 'input' def __new__(cls): suite = super(test_suite, cls).__new__(cls) suite.addTest(unittest.makeSuite(SparcUtilsCliInputTestCase)) return suite if __name__ == '__main__': zope.testrunner.run([ '--path', os.path.dirname(__file__), '--tests-pattern', os.path.splitext( os.path.basename(__file__))[0] ])
"""Kata url: https://www.codewars.com/kata/57cfdf34902f6ba3d300001e.""" from typing import List def two_sort(array: List[str]) -> str: return '***'.join(sorted(array)[0])
n, a, b, c, d = [int(x) - 1 for x in input().split()] s = input() if c > d: x = s[a:c + 1] y = s[b - 1:d + 2] if "##" in x or "..." not in y: print("No") else: print("Yes") else: x = s[a:d + 1] if "##" in x: print("No") else: print("Yes")
# -*- coding: utf-8 -*- ''' .. created on 08.09.2016 .. by Christoph Schmitt ''' from __future__ import print_function, absolute_import, division, unicode_literals from reflexif.compat import * from reflexif.framework import model_base, declarative from reflexif.framework.model_base import structs def parentalias(cls=None): def parent(self): if cls is not None and not isinstance(self.parent, cls): raise TypeError('parent of %r is not an instance of %r' % (self, cls)) return self.parent return property(parent) def parse(field, returns_frame=False): if not isinstance(field, model_base.Field): raise ValueError def decorator(f): f.parsed_field = field f.returns_frame = returns_frame return f return decorator class State(object): def __init__(self, little_endian=False, resilient=False, active_extensions=None): self.little_endian = little_endian self.resilient = resilient self.seen_ifd_pointers = set() self.active_extensions = active_extensions or {} class Parser(object): def __init__(self, frame, state=None, context=declarative.default_parser_context, extension_context=declarative.default_extension_context): self.context = context self.extension_context = extension_context if state is None: ext_classes = frozenset(extension_context.all_extension_classes) ext_lookup = extension_context.map_extensions(ext_classes) state = State(active_extensions=ext_lookup) self.frame = frame self.state = state self.target = None self.on_create() @property def annotation(self): return self.context[type(self)] def on_create(self): pass def get_taget_class(self): parsed_class = self.annotation.parsed_class if not self.extensions: return parsed_class else: ext_classes = [e.extension_class for e in self.extensions] # use str to be compatible with Python 2 # see http://stackoverflow.com/questions/19618031 name = str('Extended' + parsed_class.__name__) bases = (parsed_class,) + tuple(ext_classes) return type(name, bases, {}) def __call__(self): # print(type(self)) if self.target is None: target_cls = self.get_taget_class() self.target = target_cls(self.frame) for field in self.annotation.parsed_class.fields: try: self.parse_field(field) except: # print('error: %s: %s' % (type(self), field)) raise self.parse_extensions() return self.target def parse_field(self, field): parse_func = self.annotation.field_parsers.get(field) if parse_func is None: if field.is_value: fallback = lambda: self.unpack_value(field) else: fallback = lambda: self.parse_child_field(field) value, sub_frame = fallback() else: value, sub_frame = parse_func(self) setattr(self.target, field.name, value) self.target.frames[field.name] = sub_frame def unpack_value(self, field, sub_frame=None): if sub_frame is None: sub_frame = self.frame[field.slice] if field.struct_spec is None: value = bytes(sub_frame.data) else: if self.state.little_endian: s = structs.le[field.struct_spec] else: s = structs.be[field.struct_spec] # print(self, field.name, s.format, sub_frame, sub_frame.data, field.slice) value = s.unpack(sub_frame.data)[0] return value, sub_frame def parse_child_field(self, field, sub_frame=None): if sub_frame is None: sub_frame = self.frame[field.slice] return self.parse_class(field.cls, sub_frame) def parse_class(self, cls, sub_frame, state=None): if state is None: state = self.state try: sub_parser_cls = self.context.resolve_parser(cls) except AttributeError: sub_parser_cls = self.default_parser(cls) sub_parser = sub_parser_cls(sub_frame, self.state) sub_parser.parent = self return sub_parser(), sub_frame def default_parser(self, cls): parser_cls = type(cls.__name__ + 'Parser', (Parser,), {}) deco = self.context.parses(cls) return deco(parser_cls) @property def extensions(self): return self.state.active_extensions.get(self.annotation.parsed_class) def parse_extensions(self): if not self.extensions: return for extension in self.extensions: # print('parsing extension %s, parent parser: %s, target: %s' % (extension, self, self.target)) parser_cls = self.context.resolve_parser(extension.extension_class) parser = parser_cls(self.frame, state=self.state) parser.target = self.target parser()
from STC_Path_Testing.nextdate import Date class TestNextDateCoverageClass: """ Valid Value Range: 1812 <= year <= 2016 1 <= month <= 12 1 <= day <= 31 """ def test_nextdate_c0(self): assert Date(1811, 0, 0).nextdate == "INVALID" assert Date(2015, 12, 31).nextdate == "2016, 1, 1" assert Date(2016, 2, 29).nextdate == "2016, 3, 1" assert Date(2016, 3, 1).nextdate == "2016, 3, 2" def test_nextdate_c1(self): assert Date(2020, 1, 1).nextdate == "INVALID" assert Date(2012, 0, 1).nextdate == "INVALID" assert Date(2012, 1, 0).nextdate == "INVALID" assert Date(2015, 12, 31).nextdate == "2016, 1, 1" assert Date(2015, 1, 31).nextdate == "2015, 2, 1" assert Date(2015, 4, 30).nextdate == "2015, 5, 1" assert Date(2015, 2, 28).nextdate == "2015, 3, 1" assert Date(2012, 2, 29).nextdate == "2012, 3, 1" assert Date(2015, 5, 1).nextdate == "2015, 5, 2" def test_nextdate_c2(self): """ Because of NextDate Problem doesn't have a Loop so we don't have C2 Coverage Test """ def test_nextdate_mcdc(self): # MCDC: action to Year + 1, Month = 1, Day = 1 assert Date(2013, 12, 31).nextdate == "2014, 1, 1" assert Date(2014, 12, 31).nextdate == "2015, 1, 1" # MCDC: action to Year, Month + 1, Day = 1 assert Date(2013, 1, 31).nextdate == "2013, 2, 1" assert Date(2013, 4, 30).nextdate == "2013, 5, 1" assert Date(2014, 2, 28).nextdate == "2014, 3, 1" assert Date(2012, 2, 29).nextdate == "2012, 3, 1" # MCDC: action to Year, Month, Day + 1 assert Date(2015, 1, 1).nextdate == "2015, 1, 2" assert Date(2015, 2, 1).nextdate == "2015, 2, 2"
# bendy.py # # animate the bones of the 'izzy' GLTF model from Sketchfab # bone names came from inspecting scene.gltf # assumes the model 'izzy' already exists in ARENA scene 'cesium' import arena import random import time import signal HOST = "oz.andrew.cmu.edu" SCENE = "cesium" bones=[ "CC_Base_Spine01_correct_0207", "CC_Base_Waist_correct_0206", "CC_Base_Spine02_correct_0205", "CC_Base_R_Clavicle_correct_0149", "CC_Base_R_Upperarm_correct_0197", "CC_Base_R_UpperarmTwist01_correct_0153", "CC_Base_R_UpperarmTwist02_correct_0154", "CC_Base_R_Forearm_correct_0155", "CC_Base_R_Elbow_correct_0160", "CC_Base_R_ForearmTwist01_correct_0159", "CC_Base_R_ForearmTwist02_correct_0150", "CC_Base_R_Hand_correct_0165", "CC_Base_R_Finger00_correct_0147", "CC_Base_R_Finger01_correct_0148", "CC_Base_R_Finger02_correct_0184", "CC_Base_R_Finger0Nub_correct_0172", "CC_Base_R_Finger10_correct_0186", "CC_Base_R_Finger11_correct_0171", "CC_Base_R_Finger12_correct_0192", "CC_Base_R_Finger1Nub_correct_0193", "CC_Base_R_Finger20_correct_0191", "CC_Base_R_Finger21_correct_0196", "CC_Base_R_Finger22_correct_0158", "CC_Base_R_Finger2Nub_correct_0156", "CC_Base_R_Finger30_correct_0151", "CC_Base_R_Finger31_correct_0152", "CC_Base_R_Finger32_correct_0177", "CC_Base_R_Finger3Nub_correct_0181", "CC_Base_R_Finger40_correct_0157", "CC_Base_R_Finger41_correct_0179", "CC_Base_R_Finger42_correct_0180", "CC_Base_R_Finger4Nub_correct_0176", "CC_Base_L_Clavicle_correct_0167", "CC_Base_L_Upperarm_correct_0166", "CC_Base_L_UpperarmTwist01_correct_0168", "CC_Base_L_UpperarmTwist02_correct_0182", "CC_Base_L_Forearm_correct_0183", "CC_Base_L_Elbow_correct_0178", "CC_Base_L_ForearmTwist01_correct_0190", "CC_Base_L_ForearmTwist02_correct_0185", "CC_Base_L_Hand_correct_0188", "CC_Base_L_Finger00_correct_0189", "CC_Base_L_Finger01_correct_0164", "CC_Base_L_Finger02_correct_0163", "CC_Base_L_Finger0Nub_correct_0162", "CC_Base_L_Finger10_correct_0195", "CC_Base_L_Finger11_correct_0199", "CC_Base_L_Finger12_correct_0200", "CC_Base_L_Finger1Nub_correct_0161", "CC_Base_L_Finger20_correct_0198", "CC_Base_L_Finger21_correct_0187", "CC_Base_L_Finger22_correct_0173", "CC_Base_L_Finger2Nub_correct_0174", "CC_Base_L_Finger30_correct_0201", "CC_Base_L_Finger31_correct_0175", "CC_Base_L_Finger32_correct_0194", "CC_Base_L_Finger3Nub_correct_0169", "CC_Base_L_Finger40_correct_0170", "CC_Base_L_Finger41_correct_0202", "CC_Base_L_Finger42_correct_0203", "CC_Base_L_Finger4Nub_correct_0204", "CC_Base_R_Ribs_correct_00", "CC_Base_R_RibsNub_correct_01", "CC_Base_R_RibsTwist_correct_02", "CC_Base_R_Breast_correct_03", "CC_Base_R_BreastNub_correct_04", "CC_Base_L_Ribs_correct_05", "CC_Base_L_RibsNub_correct_06", "CC_Base_L_RibsTwist_correct_07", "CC_Base_L_Breast_correct_0208", "CC_Base_L_BreastNub_correct_0209", "CC_Base_NeckTwist01_correct_0210", "CC_Base_NeckTwist02_correct_0211", "CC_Base_Head_correct_0212", "CC_Base_HeadNub_correct_0213", "CC_Base_R_Abdominal_correct_0214", "CC_Base_R_AbdominalNub_correct_0215", "CC_Base_L_Abdominal_correct_0216", "CC_Base_L_AbdominalNub_correct_0217", "CC_Base_Pelvis_correct_0218", "CC_Base_R_Thigh_correct_0219", "CC_Base_R_ThighTwist01_correct_0220", "CC_Base_R_ThighTwist02_correct_0221", "CC_Base_R_Calf_correct_0222", "CC_Base_R_Knee_correct_0223", "CC_Base_R_CalfTwist01_correct_0224", "CC_Base_R_CalfTwist02_correct_0225", "CC_Base_R_Foot_correct_0226", "CC_Base_R_ToeBase_correct_0227", "CC_Base_R_Toe00_correct_0228", "CC_Base_R_Toe00Nub_correct_0229", "CC_Base_R_Toe10_correct_0230", "CC_Base_R_Toe10Nub_correct_0231", "CC_Base_R_Toe20_correct_0232", "CC_Base_R_Toe20Nub_correct_0233", "CC_Base_R_Toe30_correct_0234", "CC_Base_R_Toe30Nub_correct_0235", "CC_Base_R_Toe40_correct_0236", "CC_Base_R_Toe40Nub_correct_0237", "CC_Base_R_ToeBaseShareBone_correct_0238", "CC_Base_R_Hip0_correct_0239", "CC_Base_R_Hip0Nub_correct_0240", "CC_Base_L_Thigh_correct_0241", "CC_Base_L_ThighTwist01_correct_0242", "CC_Base_L_ThighTwist02_correct_0243", "CC_Base_L_Calf_correct_0244", "CC_Base_L_Knee_correct_0245", "CC_Base_L_CalfTwist01_correct_0246", "CC_Base_L_CalfTwist02_correct_0247", "CC_Base_L_Foot_correct_0248", "CC_Base_L_ToeBase_correct_0249", "CC_Base_L_Toe40_correct_0250", "CC_Base_L_Toe40Nub_correct_0251", "CC_Base_L_Toe30_correct_0252", "CC_Base_L_Toe30Nub_correct_0253", "CC_Base_L_Toe20_correct_0254", "CC_Base_L_Toe20Nub_correct_0255", "CC_Base_L_Toe10_correct_0256", "CC_Base_L_Toe10Nub_correct_0257", "CC_Base_L_Toe00_correct_0258", "CC_Base_L_Toe00Nub_correct_0259", "CC_Base_L_ToeBaseShareBone_correct_0260", "CC_Base_L_Hip0_correct_0261", "CC_Base_L_Hip0Nub_correct_0262"] arena.init(HOST, "realm", SCENE) def randrot(): r = round((random.random()/10 - 0.05), 3) #print(r) return r def signal_handler(sig, frame): exit() signal.signal(signal.SIGINT, signal_handler) messages = [] counter = 0 while True: obj = scene.updateBone( object_id="izzy", rotation=(randrot(),randrot(),randrot(),1), bone_id = bones[random.randint(0,len(bones)-1)] ) time.sleep(0.1) exit()
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict import argparse import json import logging import os import sys import time from dataclasses import dataclass from typing import Any, Dict, List, Optional, Set, Type from typing_extensions import Final from ...client import statistics from .annotated_function_generator import ( # noqa AnnotatedFunctionGenerator, FunctionVisitor, FunctionDefinition, ) from .generator_specifications import DecoratorAnnotationSpecification # noqa from .get_annotated_free_functions_with_decorator import ( # noqa AnnotatedFreeFunctionWithDecoratorGenerator, ) from .get_class_sources import ClassSourceGenerator # noqa from .get_constructor_initialized_attribute_sources import ( # noqa ConstructorInitializedAttributeSourceGenerator, ) from .get_django_class_based_view_models import DjangoClassBasedViewModels # noqa from .get_dynamic_graphql_sources import DynamicGraphQLSourceGenerator # noqa from .get_exit_nodes import ExitNodeGenerator # noqa from .get_filtered_sources import FilteredSourceGenerator # noqa from .get_globals import GlobalModelGenerator # noqa from .get_graphene_models import GrapheneModelsGenerator # noqa from .get_graphql_sources import GraphQLSourceGenerator # noqa from .get_methods_of_subclasses import MethodsOfSubclassesGenerator # noqa from .get_models_filtered_by_callable import ModelsFilteredByCallableGenerator # noqa from .get_request_specific_data import RequestSpecificDataGenerator # noqa from .get_REST_api_sources import RESTApiSourceGenerator # noqa from .get_undecorated_sources import UndecoratedSourceGenerator # noqa from .model import Model from .model_generator import ModelGenerator LOG: logging.Logger = logging.getLogger(__name__) @dataclass class GenerationArguments: """ When adding new generation options, make sure to add a default value for them for backwards compatibility. We construct GenerationArguments objects outside the current directory, and adding a non-optional argument will break those. """ mode: Final[Optional[List[str]]] verbose: bool output_directory: Final[Optional[str]] def _file_exists(path: str) -> str: if not os.path.exists(path): raise ValueError("No file at `{path}`") return path def _parse_arguments( generator_options: Dict[str, ModelGenerator[Model]] ) -> GenerationArguments: parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbose", action="store_true", help="Verbose logging") parser.add_argument("--mode", action="append", choices=generator_options.keys()) parser.add_argument( "--output-directory", type=_file_exists, help="Directory to write models to" ) arguments: argparse.Namespace = parser.parse_args() return GenerationArguments( mode=arguments.mode, verbose=arguments.verbose, output_directory=arguments.output_directory, ) def _report_results( models: Dict[str, Set[Model]], output_directory: Optional[str] ) -> None: if output_directory is not None: for name in models: # Try to be slightly intelligent in how we name files. if name.startswith("get_"): filename = f"generated_{name[4:]}" else: filename = f"generated_{name}" with open(f"{output_directory}/{filename}.pysa", "w") as output_file: output_file.write( "\n".join([str(model) for model in sorted(models[name])]) ) output_file.write("\n") print( json.dumps( { "number of generated models": sum( (len(generated_models) for generated_models in models.values()) ) } ) ) else: all_models = set() for name in models: all_models = all_models.union(models[name]) print("\n".join([str(model) for model in sorted(all_models)])) def run_from_parsed_arguments( generator_options: Dict[str, ModelGenerator[Model]], arguments: GenerationArguments, default_modes: List[str], logger_executable: Optional[str] = None, include_default_modes: bool = False, ) -> None: argument_modes = arguments.mode or [] if len(argument_modes) == 0 or include_default_modes: modes = list(set(argument_modes + default_modes)) else: modes = argument_modes generated_models: Dict[str, Set[Model]] = {} for mode in modes: LOG.info("Computing models for `%s`", mode) start = time.time() generated_models[mode] = set(generator_options[mode].generate_models()) elapsed_time_seconds = time.time() - start LOG.info(f"Computed models for `{mode}` in {elapsed_time_seconds:.3f} seconds.") if logger_executable is not None: elapsed_time_milliseconds = int(elapsed_time_seconds * 1000) statistics.log( statistics.LoggerCategory.PERFORMANCE, integers={"time": elapsed_time_milliseconds}, normals={ "name": "model generation", "model kind": mode, "command_line": " ".join(sys.argv), }, logger=logger_executable, ) _report_results(generated_models, arguments.output_directory) def run_generators( generator_options: Dict[str, ModelGenerator[Model]], default_modes: List[str], verbose: bool = False, logger_executable: Optional[str] = None, include_default_modes: bool = False, ) -> None: arguments = _parse_arguments(generator_options) logging.basicConfig( format="%(asctime)s %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.DEBUG if verbose or arguments.verbose else logging.INFO, ) run_from_parsed_arguments( generator_options, arguments, default_modes, logger_executable, include_default_modes=include_default_modes, )
"""Unit tests for the Edit user profile form.""" from django import forms from django.test import TestCase, tag from BookClub.forms import EditProfileForm from BookClub.models import User @tag('forms', 'user') class ProfileFormTestCase(TestCase): """Update User Details Form Tests.""" fixtures = ['BookClub/tests/fixtures/default_users.json'] def setUp(self): self.form_input = { 'username': 'johndoe2', 'email': 'johndoe2@example.org', 'public_bio': 'My bio', } def test_form_has_necessary_fields(self): form = EditProfileForm() self.assertIn('username', form.fields) self.assertIn('email', form.fields) email_field = form.fields['email'] self.assertTrue(isinstance(email_field, forms.EmailField)) self.assertIn('public_bio', form.fields) def test_valid_user_form(self): form = EditProfileForm(data=self.form_input) self.assertTrue(form.is_valid()) def test_form_uses_model_validation(self): self.form_input['username'] = '.badusername.' form = EditProfileForm(data=self.form_input) self.assertFalse(form.is_valid()) def test_form_must_save_correctly(self): user = User.objects.get(username='johndoe') form = EditProfileForm(instance=user, data=self.form_input) before_count = User.objects.count() form.save() after_count = User.objects.count() self.assertEqual(after_count, before_count) self.assertEqual(user.username, 'johndoe2') self.assertEqual(user.email, 'johndoe2@example.org') self.assertEqual(user.public_bio, 'My bio')
import os from celery import Celery import synchronous.settings as settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "synchronous.settings") app = Celery("synchronous") app.config_from_object("django.conf:settings") app.autodiscover_tasks() # make sure to run `celery -A synchronous worker -B -l info` # as well as set up rabbitmq (rabbitmq-server)
from setuptools import setup, find_packages setup( name = "docuware-client", version = "0.1.0", description = "DocuWare REST-API client", long_description = open("README.md").read(), long_description_content_type = "text/markdown", url = "https://github.com/sniner/docuware-client", author = "Stefan Schรถnberger", author_email = "mail@sniner.dev", install_requires=[ "requests>=2.27", ], packages = find_packages(), entry_points = { "console_scripts": [ "dw-client = docuware.cli.dw:main", ] }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", ], platforms = "any", python_requires = ">=3.9", )
import numpy import lasagne, lasagne.updates, lasagne.regularization from lasagne import layers import theano from theano import tensor as T class MILogisticRegression(object): """Multi-instance logistic regression classifier. A class for performing [deep] multi-instance logistic regression using the Theano and Lasagne libraries. Training instances are allowed to be grouped into bags, where at least one (but not necessarily all) instances in a bag are positive. Negative bags have all negative instances, and they should be passed as singleton bags to the training method. If hidden_units is greater than 1, a linear hidden layer with that many hidden units, followed by a maxout layer with a sigmoid activation function, will be used to compute the instance-level probabilities. Parameters ---------- fit_intercept : whether to include an intercept term in the model max_iter : maximum number of iterations for the fitting algorithm tol : minimum change in loss function to determine convergence penalty : regularization penalty for the weights, l1, l2, or None hidden_units : number of hidden units followed by a maxout layer updater : update function lasagne.updates to optimize the loss C : inverse regularization penalty (like scikit-learn) learning_rate : learning rate of the update algorithm """ def __init__(self, fit_intercept=True, max_iter=100, tol=1e-5, penalty="l2", hidden_units=1, updater=lasagne.updates.nesterov_momentum, C=1.0, learning_rate=0.1): self.fit_intercept = fit_intercept self.max_iter = max_iter self.updater = updater self.learning_rate = learning_rate self.C = C self.tol = tol self.hidden_units = hidden_units if penalty == "l2": self.penalty = lasagne.regularization.l2 elif penalty == "l1": self.penalty = lasagne.regularization.l1 elif penalty is None: self.C = None self.penalty = None else: raise ValueError("penalty must be 'l1', 'l2', or None") def _setup_model(self, num_features, num_rows): if self.fit_intercept: b = lasagne.init.Constant(0.) else: b = None X_sym = T.matrix() y_sym = T.ivector() bag_labels = T.ivector() input_layer = layers.InputLayer(shape=(num_rows, num_features), input_var=X_sym) if self.hidden_units <= 1: instance_log_odds = layers.DenseLayer(input_layer, num_units=1, W=lasagne.init.Constant(0.), b=b, nonlinearity=lasagne.nonlinearities.linear) else: instance_log_odds = layers.DenseLayer(input_layer, num_units=self.hidden_units, W=lasagne.init.GlorotUniform(1.0), b=b, nonlinearity=lasagne.nonlinearities.linear) instance_log_odds = layers.FeaturePoolLayer(instance_log_odds, pool_size=self.hidden_units, pool_function=T.max) instance_log_odds = layers.FlattenLayer(instance_log_odds, outdim=1) instance_log_odds_output = layers.get_output(instance_log_odds, X_sym) instance_probs_output = T.nnet.sigmoid(instance_log_odds_output) self.all_params = layers.get_all_params(instance_log_odds, trainable=True) bag_mapper = T.transpose(T.extra_ops.to_one_hot(bag_labels, T.max(bag_labels)+1)) # if previous layers were probabilities: # bag_probs = 1 - T.exp(T.dot(bag_mapper, T.log(1 - instance_probs_output))) # if previous layers were log odds: bag_probs = 1 - T.exp(T.dot(bag_mapper, -T.nnet.softplus(instance_log_odds_output))) if self.C is None: regularization = 0 else: # I scale the penalty by num_rows since the likelihood # term is the average over instances, instead of the sum # (like sklearn). This is to make the learning rate not # depend on the dataset (or minibatch) size, but it means # we have to know the minibatch size here in order for C # to be the same as for sklearn. # # Note: this applies the same regularization to all # "regularizable" parameters in the whole network # (everything but the bias terms). I need to think more # about whether this makes sense for the deep networks, # though it's probably a reasonable starting point. regularization = 1.0/self.C/num_rows * lasagne.regularization.regularize_network_params(instance_log_odds, self.penalty) # This chunk is a bit repetitive and could be simplified: bag_loss = T.mean(lasagne.objectives.binary_crossentropy(bag_probs, y_sym)) + regularization self.f_train_bag = theano.function([X_sym, y_sym, bag_labels], [bag_loss], updates=self.updater(bag_loss, self.all_params, learning_rate=self.learning_rate)) nobag_loss = T.mean(lasagne.objectives.binary_crossentropy(instance_probs_output, y_sym)) + regularization self.f_train_nobag = theano.function([X_sym, y_sym], [nobag_loss], updates=self.updater(nobag_loss, self.all_params, learning_rate=self.learning_rate)) self.f_bag_logprobs = theano.function([X_sym, bag_labels], T.log(bag_probs)) self.f_logprobs = theano.function([X_sym], T.log(instance_probs_output)) def fit(self, X_train, y_train, bag_labels=None): """Fit the model according to the given training data. Parameters ---------- X_train : array-like, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. y_train : array-like, shape (n_bags) Bag-level target vector relative to X_train and bag_labels. All labels must be 0 or 1, and there must be samples of each type given. bag_labels : array-like, shape (n_samples) optional Bag labels for each training instance (rows of X_train), which must be integers from 0 to n_bags-1. If no labels are given, the model falls bag to standard logistic regression (each instance will have a bag of size 1, and class labels are no longer noisy). Negative bags should all be size 1, since the model does not handle them correctly otherwise. Returns ------- self : object Returns self. """ if len(numpy.unique(y_train)) != 2 or numpy.min(y_train) < 0.0 or numpy.max(y_train) > 1.0: raise ValueError("class labels should all be 0 or 1, and both should be present") X_train = numpy.asarray(X_train, dtype=numpy.float32) y_train = numpy.asarray(y_train, dtype=numpy.int32) # Always set up a new model, since it depends on the input # size and that may have changed since the last call (if any). self._setup_model(X_train.shape[1], X_train.shape[0]) train_args = [X_train, y_train] if bag_labels is None: # Train with a simpler objective if instances aren't bagged. train = self.f_train_nobag else: bag_labels = numpy.asarray(bag_labels, dtype=numpy.int32) train = self.f_train_bag train_args.append(bag_labels) last = numpy.inf for epochs in xrange(self.max_iter): curr = train(*train_args)[0] if numpy.abs(last - curr) < self.tol: break last = curr self.n_iter_ = epochs if self.fit_intercept: self.intercept_ = self.all_params[1].get_value() else: self.intercept_ = 0. self.coef_ = self.all_params[0].get_value() return self def predict_log_proba(self, X_test): """Log of instance-level probability estimates. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples] Returns the log-probability of each sample for being class 1. """ return self.f_logprobs(numpy.asarray(X_test, dtype=numpy.float32)) def predict(self, X_test): """Instance-level class predictions. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples] Returns the predicted class of each sample (0 or 1). """ return numpy.asarray(self.predict_log_proba(X_test) >= numpy.log(0.5), dtype=numpy.int32) def predict_proba(self, X_test): """Instance-level probability estimates. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- T : array-like, shape = [n_samples] Returns the probability of each sample for being class 1. """ return numpy.exp(self.predict_log_proba(X_test)) def predict_log_bag_proba(self, X_test, bag_labels): """Log of bag-level probability estimates. Parameters ---------- X : array-like, shape = [n_samples, n_features] bag_labels : array-like, shape = [n_samples, n_bags] A list of bag labels for the samples, required to be integers from 0 to n_bags-1. Returns ------- T : array-like, shape = [n_bags] Returns the log-probability of each bag for being class 1. """ return self.f_bag_logprobs(numpy.asarray(X_test, dtype=numpy.float32), numpy.asarray(bag_labels, dtype=numpy.int32)) def predict_bag_proba(self, X_test, bag_labels): """Bag-level probability estimates. Parameters ---------- X : array-like, shape = [n_samples, n_features] bag_labels : array-like, shape = [n_samples, n_bags] A list of bag labels for the samples, required to be integers from 0 to n_bags-1. Returns ------- T : array-like, shape = [n_bags] Returns the probability of each bag for being class 1. """ return numpy.exp(self.predict_log_bag_proba(X_test, bag_labels)) def predict_bag(self, X_test, bag_labels): """Bag-level class predictions. Parameters ---------- X : array-like, shape = [n_samples, n_features] bag_labels : array-like, shape = [n_samples, n_bags] A list of bag labels for the samples, required to be integers from 0 to n_bags-1. Returns ------- T : array-like, shape = [n_bags] Returns the predicted class of each bag (0 or 1). """ return numpy.asarray(self.predict_log_bag_proba(X_test, bag_labels) >= numpy.log(0.5), dtype=numpy.int32)
def load_bbox(bbox_file): img_to_bbox = {} with open(bbox_file, 'rb') as f: lines = f.readlines() for line in lines[2:]: flag = line.strip('\n').split(' ') img_to_bbox[flag[0]] = [int(flag[-4]), int(flag[-3]), int(flag[-2]), int(flag[-1])] return img_to_bbox def get_trainset(data_path): train_val = [] with open(data_path, 'rb') as f: lines = f.readlines() for line in lines[2:]: flag = line.strip('\n').split(' ') if flag[-1] == 'train': train_val.append(flag[0]) print "num of trainval is %d" % len(train_val) return train_val def load_category(data_path, top_thresh, down_thresh, full_thresh): img_to_category = {} with open(data_path, 'rb') as f: lines = f.readlines() for line in lines[2:]: flag = line.strip('\n').split(' ') if int(flag[-1]) <= top_thresh:c img_to_category[flag[0]] = 1 elif int(flag[-1]) <= down_thresh: img_to_category[flag[0]] = 2 else: img_to_category[flag[0]] = 3 return img_to_category def write_new_file(train_val, img_to_bbox, img_to_category, wtf_path): with open(wtf_path, 'w') as f: for idx, img in enumerate(train_val): print "Processing %d/%d!!!!" % (idx+1, len(train_val)) category_id = img_to_category[img] bbox = img_to_bbox[img] f.write(img+' '+str(category_id)+' '+str(bbox[0])+\ ' '+str(bbox[1])+' '+str(bbox[2])+' '+str(bbox[3])+'\n') if __name__ == '__main__': # file_path = '/data/home/liuhuawei/clothing_data/Eval/list_eval_partition.txt' # bbox_path = '/data/home/liuhuawei/clothing_data/Anno/list_bbox.txt' # category_path = '/data/home/liuhuawei/clothing_data/Anno/list_category_img.txt' # train_val = get_trainset(file_path) # ##1-20 # top_thresh = 20 # ##21-36 # down_thresh = 36 # ##37-50 # full_thresh = 50 # img_to_bbox = load_bbox(bbox_path) # img_to_category = load_category(category_path, top_thresh, down_thresh, full_thresh) # wtf_path = '/data/home/liuhuawei/clothing.txt' # write_new_file(train_val, img_to_bbox, img_to_category, wtf_path)
#!/usr/bin/env python """ This is a subclass seisflows.system.Serial Provides utilities for submitting jobs in serial on a single machine, mostly for testing purposes """ import os import sys import numpy as np import os from seisflows.tools import unix from seisflows.config import custom_import from seisflows.tools.err import ParameterError PAR = sys.modules["seisflows_parameters"] PATH = sys.modules["seisflows_paths"] class Serial(custom_import("system", "base")): """ Run tasks in a serial fashion on a single local machine """ def check(self): """ Checks parameters and paths """ # name of the job if "TITLE" not in PAR: setattr(PAR, "TITLE", os.path.basename(os.path.abspath("."))) # number of tasks if "NTASK" not in PAR: setattr(PAR, "NTASK", 1) # number of processers per task if "NPROC" not in PAR: setattr(PAR, "NPROC", 1) # how to invoke executables if "MPIEXEC" not in PAR: setattr(PAR, "MPIEXEC", "") # level of detail in output messages if "VERBOSE" not in PAR: setattr(PAR, "VERBOSE", 1) # where job was submitted if "WORKDIR" not in PATH: setattr(PATH, "WORKDIR", os.path.abspath(".")) # where output files are written if "OUTPUT" not in PATH: setattr(PATH, "OUTPUT", os.path.join(PATH.WORKDIR, "output")) # where temporary files are written if "SCRATCH" not in PATH: setattr(PATH, "SCRATCH", os.path.join(PATH.WORKDIR, "scratch")) # where system files are written if "SYSTEM" not in PATH: setattr(PATH, "SYSTEM", os.path.join(PATH.SCRATCH, "system")) # optional local filesystem scratch path if "LOCAL" not in PATH: setattr(PATH, "LOCAL", None) def submit(self, workflow): """ Submits the main workflow job """ # create scratch directories unix.mkdir(PATH.SCRATCH) unix.mkdir(PATH.SYSTEM) # create output directories unix.mkdir(PATH.OUTPUT) workflow.checkpoint() # execute workflow workflow.main() def run(self, classname, method, hosts="all", **kwargs): """ Executes task multiple times in serial """ unix.mkdir(PATH.SYSTEM) for taskid in range(PAR.NTASK): os.environ["SEISFLOWS_TASKID"] = str(taskid) self.progress(taskid) func = getattr(__import__("seisflows_" + classname), method) func(**kwargs) def run_single(self, classname, method, *args, **kwargs): """ Runs task a single time """ os.environ["SEISFLOWS_TASKID"] = "0" func = getattr(__import__("seisflows_" + classname), method) func(**kwargs) def taskid(self): """ Provides a unique identifier for each running task """ return int(os.environ["SEISFLOWS_TASKID"]) def mpiexec(self): """ Specifies MPI executable used to invoke solver """ return PAR.MPIEXEC def progress(self, taskid): """ Provides status update by printing the current task being performed """ if PAR.NTASK > 1: print(f"task {taskid + 1:02d} of {PAR.NTASK:02d}")
import copy def print_seating(s): print() for r in s: print(''.join(r)) print() def get_neighbors(seating, i,j): if i == 0: i_ops = [0,+1] elif i == max_r: i_ops = [-1,0] else: i_ops = [-1,0,+1] if j == 0: j_ops = [0,+1] elif j == max_c: j_ops = [-1,0] else: j_ops = [-1,0,+1] occupied = 0 for ii in i_ops: for jj in j_ops: if ii == 0 and jj == 0: continue if seating[i + ii][j + jj] == '#': occupied += 1 return occupied def shuffle_seats(seating): any_changed = False new_seating = copy.deepcopy(seating) for i, row in enumerate(seating): for j, seat in enumerate(row): if seat in ['#','L']: n = get_neighbors(seating, i,j) if n >= 4 and seat == '#': new_seating[i][j] = 'L' any_changed = True elif n == 0 and seat == 'L': new_seating[i][j] = '#' any_changed = True #print_seating(new_seating) if any_changed: new_seating = shuffle_seats(new_seating) return new_seating ##########################333 sc = [list(s.strip()) for s in open("input.txt") if s.strip()] max_r = len(sc) - 1 max_c = len(sc[0]) - 1 n_sc = shuffle_seats(sc) print_seating(n_sc) occupied = 0 for s in n_sc: occupied += s.count("#") print("Occupied seats",occupied)
#!/usr/bin/env python3 """ Solve any size rubiks cube: - For 2x2x2 and 3x3x3 just solve it - For 4x4x4 and larger, reduce to 3x3x3 and then solve """ # standard libraries import argparse import datetime as dt import logging import resource import sys from math import sqrt # rubiks cube libraries from rubikscubennnsolver import SolveError, configure_logging, reverse_steps if sys.version_info < (3, 6): raise SystemError("Must be using Python 3.6 or higher") configure_logging() logger = logging.getLogger(__name__) logger.info("rubiks-cube-solver.py begin") start_time = dt.datetime.now() parser = argparse.ArgumentParser() parser.add_argument("--print-steps", default=False, action="store_true", help="Display animated step-by-step solution") parser.add_argument("--debug", default=False, action="store_true", help="set loglevel to DEBUG") parser.add_argument("--no-comments", default=False, action="store_true", help="No comments in alg.cubing.net url") # CPU mode parser.add_argument( "--min-memory", default=False, action="store_true", help="Load smaller tables to use less memory...takes longer to run", ) action = parser.add_mutually_exclusive_group(required=False) parser.add_argument("--openwith", default=None, type=str, help="Colors for sides U, L, etc") parser.add_argument("--colormap", default=None, type=str, help="Colors for sides U, L, etc") parser.add_argument("--order", type=str, default="URFDLB", help="order of sides in --state, default kociemba URFDLB") parser.add_argument("--solution333", type=str, default=None, help="cube explorer optimal steps for solving 3x3x3") parser.add_argument( "--state", type=str, help="Cube state", default="LBBUUURBDDBBDFLFLUDFBFDDFLLLLRLRFRDUDBULBLFLDLFBLBUDFURURDUUBFFBBRBRLBRFLLDRRDDFRRUURRFDUFBFURUD", ) args = parser.parse_args() if "G" in args.state: args.state = args.state.replace("G", "F") args.state = args.state.replace("Y", "D") args.state = args.state.replace("O", "L") args.state = args.state.replace("W", "U") if args.debug: logger.setLevel(logging.DEBUG) size = int(sqrt((len(args.state) / 6))) if size == 2: # rubiks cube libraries from rubikscubennnsolver.RubiksCube222 import RubiksCube222 cube = RubiksCube222(args.state, args.order, args.colormap) elif size == 3: # rubiks cube libraries from rubikscubennnsolver.RubiksCube333 import RubiksCube333 cube = RubiksCube333(args.state, args.order, args.colormap) elif size == 4: # rubiks cube libraries from rubikscubennnsolver.RubiksCube444 import RubiksCube444 cube = RubiksCube444(args.state, args.order, args.colormap) elif size == 5: # rubiks cube libraries from rubikscubennnsolver.RubiksCube555 import RubiksCube555 cube = RubiksCube555(args.state, args.order, args.colormap) elif size == 6: # rubiks cube libraries from rubikscubennnsolver.RubiksCube666 import RubiksCube666 cube = RubiksCube666(args.state, args.order, args.colormap) elif size == 7: # rubiks cube libraries from rubikscubennnsolver.RubiksCube777 import RubiksCube777 cube = RubiksCube777(args.state, args.order, args.colormap) elif size % 2 == 0: # rubiks cube libraries from rubikscubennnsolver.RubiksCubeNNNEven import RubiksCubeNNNEven cube = RubiksCubeNNNEven(args.state, args.order, args.colormap) else: # rubiks cube libraries from rubikscubennnsolver.RubiksCubeNNNOdd import RubiksCubeNNNOdd cube = RubiksCubeNNNOdd(args.state, args.order, args.colormap) cube.sanity_check() cube.print_cube("Initial Cube") cube.www_header() cube.www_write_cube("Initial Cube") if args.openwith: for step in args.openwith.split(): cube.rotate(step) cube.print_cube("post --openwith") if args.solution333: solution333 = reverse_steps(args.solution333.split()) else: solution333 = [] cube.solve(solution333) end_time = dt.datetime.now() cube.print_cube("Final Cube") cube.print_solution(not args.no_comments) logger.info("*********************************************************************************") logger.info("See /tmp/rubiks-cube-NxNxN-solver/index.html for more detailed solve instructions") logger.info("*********************************************************************************\n") # Now put the cube back in its initial state and verify the solution solves it solution = cube.solution cube.re_init() len_steps = len(solution) for (i, step) in enumerate(solution): if args.print_steps: print(("Move %d/%d: %s" % (i + 1, len_steps, step))) cube.rotate(step) www_desc = "Cube After Move %d/%d: %s<br>\n" % (i + 1, len_steps, step) cube.www_write_cube(www_desc) if args.print_steps: cube.print_cube(f"--print-steps {step}") print("\n\n\n\n") cube.www_footer() if args.print_steps: cube.print_cube("--print-steps DONE") if args.min_memory: print("\n\n****************************************") print("--min-memory has been replaced by --fast") print("****************************************\n\n") logger.info("rubiks-cube-solver.py end") logger.info(f"Memory : {resource.getrusage(resource.RUSAGE_SELF).ru_maxrss:,} bytes") logger.info(f"Time : {end_time - start_time}") logger.info("") if not cube.solved(): raise SolveError("cube should be solved but is not")
import turtle turtle.setup(1440,900) turtle.bgcolor('black') # Setup t = turtle.Turtle() t.penup() t.goto(-40,-50) t.pendown() # Yellow Square t.color('yellow') t.begin_fill() t.forward(100) t.left(90) t.forward(100) t.left(90) t.forward(100) t.left(90) t.forward(100) t.left(90) t.end_fill() # Orange Parallelogram t.color('orange') t.begin_fill() t.left(135) t.forward(60) t.right(45) t.forward(100) t.left(45) t.backward(60) t.end_fill() # Red Parallelogram t.color('red') t.begin_fill() t.penup() t.forward(60) t.right(45) t.right(90) t.pendown() t.forward(100) t.right(45) t.forward(60) t.end_fill() t.penup() t.color('black') t.forward(100) turtle.done()
# Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def sumEvenGrandparent(self, root: TreeNode) -> int: self.k=0 def dfs(root): if root: if root.val%2==0: if root.left: if root.left.left: self.k+=root.left.left.val if root.left.right: self.k+=root.left.right.val if root.right: if root.right.right: self.k+=root.right.right.val if root.right.left: self.k+=root.right.left.val dfs(root.left) dfs(root.right) dfs(root) return self.k
# Copies indices (settings, mappings, and optionally data) from a 5 cluster to a 7 cluster. # Note that when copying data, the copied is performed through this machine, meaning all data is downloaded from 5, # and then uploaded to 7. This can be a very slow process if you have a lot of data, and is recommended you only do # this for small indices as a result. # Requires python 3+ and elasticsearch's python lib to be installed (pip install elasticsearch). import argparse import elasticsearch import elasticsearch.helpers import ssl import time parser = argparse.ArgumentParser(description="Transfers ES indexes between clusters.") parser.add_argument('-s', '--source', required=True, help='Source cluster URL and port.') parser.add_argument('-d', '--dest', required=True, help='Destination cluster URL and port.') parser.add_argument('--source-ssl', required=False, default=True, help='Enables / disables source SSL.') parser.add_argument('--dest-ssl', required=False, default=True, help='Enables / disables destination SSL.') parser.add_argument('--cert-file', required=False, default=None, help='Cert file to use with SSL.') parser.add_argument('--key-file', required=False, default=None, help='Key file to use with SSL.') parser.add_argument('--ca-file', required=False, default=None, help='Certificate authority file to use for SSL.') parser.add_argument('--create-only', required=False, default=False, help='If true, only create the index (with settings/mappings/aliases).') parser.add_argument('-i', '--indices', required=False, default="*", help='Regular expression for indexes to copy.') parser.add_argument('--name-override', required=False, default=None, help='destination index name override') args = parser.parse_args() def create_ssl_context(): if args.cert_file is None: raise Error('--cert-file is required with SSL.') if args.key_file is None: raise Error('--key-file is required with SSL.') if args.ca_file is None: raise Error('--ca-file is required with SSL.') context = ssl.create_default_context( ssl.Purpose.SERVER_AUTH, cafile=args.ca_file ) context.load_cert_chain( certfile=args.cert_file, keyfile=args.key_file ) return context def create_client(host, ssl_context): return elasticsearch.Elasticsearch( [host], ssl_context=ssl_context ) class EsClients: def __init__(self, source_client, dest_client): self.source_client = source_client self.dest_client = dest_client def get_index_settings(client, pattern): indices = elasticsearch.client.IndicesClient(client).get(pattern) return indices def clean_settings(config): # Settings set by the server that we can read, but not write. del config['settings']['index']['provided_name'] del config['settings']['index']['version'] del config['settings']['index']['creation_date'] del config['settings']['index']['uuid'] return config def find_max_ngram_diff_helper(obj): # Finds the greatest diff in ngram settings and returns the value. In Elasticsearch 7, an upper bound must be # explicitly set. if not isinstance(obj, dict): return -1 diff = -1 if 'min_gram' in obj and 'max_gram' in obj: diff = int(obj['max_gram']) - int(obj['min_gram']) for value in obj.values(): t = find_max_ngram_diff_helper(value) diff = max(t, diff) return diff def find_max_ngram_diff(config): settings = config['settings'] return find_max_ngram_diff_helper(settings) def update_for_seven(config): # Updates settings and mappings for Elasticsearch 7. # Should only be one value in 5 - the doc type. Unwrap for 7; document types are deprecated. config['mappings'] = next(iter(config['mappings'].values())) # Need to set max_ngram_diff if any ngram diffs are more than 1. max_ngram = find_max_ngram_diff(config) if max_ngram > 1: config['settings']['index']['max_ngram_diff'] = max_ngram # _all is deprecated and also false by default; so not even explicitly needed... if '_all' in config['mappings']: enabled = config['mappings']['_all']['enabled'] if enabled: raise Error('_all is enabled') del config['mappings']['_all'] return config def create_index(client, name, config, name_override=None): name_override = name if name_override is None else name_override # Creates the given index on the client. indices_client = elasticsearch.client.IndicesClient(client) if indices_client.exists(name_override): print('WARNING: Index %s already exists!' % name_override) return indices_client.create(name_override, body=config) timing_samples = [] # Copy pasted from source code so that we can transform documents while copying def reindex( client, source_index, target_index, query=None, target_client=None, chunk_size=500, scroll="5m", scan_kwargs={}, bulk_kwargs={}, ): # Like the elasticsearch.helpers.reindex function, but with some custom logic. Namely, allows for source/dest # indices to be on different clusters, prints status updates, and deletes the _type field. target_client = client if target_client is None else target_client docs = elasticsearch.helpers.scan(client, query=query, index=source_index, scroll=scroll, **scan_kwargs) start = time.time() count = 0 count_at_last_update = 0 last_print = start update_interval = 5 def _change_doc_index(hits, index): for h in hits: h["_index"] = index if "fields" in h: h.update(h.pop("fields")) # TODO: Need to remove "_type" otherwise it complains about keyword becoming text? Is this legitimate? if "_type" in h: del h["_type"] nonlocal count nonlocal last_print nonlocal count_at_last_update count = count + 1 # Use a window of samples to average over. if (time.time() - last_print) > update_interval: timing_samples.append((count - count_at_last_update) / (time.time() - last_print)) if len(timing_samples) > 10: timing_samples.pop(0) count_at_last_update = count last_print = time.time() print('Transferring %s docs/second. Total %s.' % (sum(timing_samples) / len(timing_samples), count)) yield h kwargs = {"stats_only": True} kwargs.update(bulk_kwargs) return elasticsearch.helpers.bulk( target_client, _change_doc_index(docs, target_index), chunk_size=chunk_size, raise_on_error=False, **kwargs ) def copy_index_data(clients, index, name_override): # Copies all documents from the source to the dest index. name_override = index if name_override is None else name_override print('Copying index %s' % index) start = time.time() res = reindex( clients.source_client, index, name_override, target_client=clients.dest_client ) end = time.time() print('Documents written %s. Errors %s.' % res) print('Took %s seconds.' % (end - start)) def main(): ssl_context=create_ssl_context() source_ssl_context = ssl_context if args.source_ssl else None dest_ssl_context = ssl_context if args.dest_ssl else None clients = EsClients(create_client(args.source, source_ssl_context), create_client(args.dest, dest_ssl_context)) indices = get_index_settings(clients.source_client, args.indices) def by_index(item): return item[0] # Sort for repeatability, and to make it easy to restart part way if the script failed. indexSorted = list(indices.items()) indexSorted.sort(key=by_index) for index, config in indexSorted: # Skip this "hidden" index that is listed for some reason. if index == '.kibana': continue config = clean_settings(config) config = update_for_seven(config) print('Creating index %s' % (index if args.name_override is None else args.name_override)) create_index(clients.dest_client, index, config, args.name_override) if args.create_only: return for index, config in indexSorted: copy_index_data(clients, index, args.name_override) main()
import numpy as np from sklearn.preprocessing import LabelBinarizer def vectorize_sequence(sequence): lb = LabelBinarizer() lb.fit(list(set(sequence))) sequence = list(sequence) return lb.transform(sequence) def load(verbose=False, vectorize=True): with np.load('rnn-challenge-data.npz') as fh: data_x, data_y = fh['data_x'], fh['data_y'] vali_x, vali_y = fh['val_x'], fh['val_y'] test_x = fh['test_x'] if vectorize: data_x, vali_x, test_x = map(lambda x: np.asarray([vectorize_sequence(seq) for seq in x]), [data_x, vali_x, test_x]) data_y, vali_y = map(lambda seq: np.asarray(vectorize_sequence(seq)), [data_y, vali_y]) if verbose: print('name_x: (count_sequences, len_sequences, count_vocab), dtype') for name, data in zip(('data_x', 'vali_x', 'text_x'), (data_x, vali_x, test_x)): print(f'{name}: {data.shape}, {data.dtype}') print('name_y: (count_sequences, count_possible_labels), dtype') for name, data in zip(('data_y', 'vali_y'), (data_y, vali_y)): print(f'{name}: {data.shape}, {data.dtype}') return data_x, data_y, vali_x, vali_y, test_x
import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("infile", type=str, help="Grepped input file") parser.add_argument("num_clients", type=int, help="Number of clients in experiment") parser.add_argument("ec_k_val", type=int, help="k value used in coding") parser.add_argument("num_warmup", type=int, help="The number of groups of k batches sent as warmup") parser.add_argument("num_batches_ignore", type=int, help="The number of batches to ignore after the warmup period") parser.add_argument("redundancy_mode", type=str, help="One of {none, equal, coded, replication, cheap}") args = parser.parse_args() if args.redundancy_mode in ["none", "equal"]: num_batches_per_group = args.ec_k_val mult_factor = 1. elif args.redundancy_mode in ["replication", "cheap"]: num_batches_per_group = 2 * args.ec_k_val mult_factor = 2. elif args.redundancy_mode == "coded": num_batches_per_group = args.ec_k_val + 1 mult_factor = num_batches_per_group / args.ec_k_val else: assert False, "Redundancy mode '{}' not recognized".format(args.redundancy_mode) num_warmup_batches = args.num_clients * args.num_warmup * num_batches_per_group num_ignore_batches = int(args.num_batches_ignore * mult_factor) num_filter_batches = num_warmup_batches + num_ignore_batches num_filter_groups = num_filter_batches / num_batches_per_group with open(args.infile, 'r') as infile: lines = infile.readlines() for line in lines: if "batch_id" in line: batch_id = int(line.split("batch_id=")[-1].split(",")[0]) if batch_id >= num_filter_batches: # Remove newline print(line[:-1]) else: # Not all metrics have a batch_id attached to them. # NOTE: We might be able to just use group_id for this, but I'm not # 100% certain. group_id = int(line.split("group_id=")[-1].split(",")[0]) if group_id >= num_filter_groups: # Remove newline print(line[:-1])
import requests from bs4 import BeautifulSoup import re from selenium import webdriver import model URL = "https://sobooks.cc" VERIFY_KEY = '2019777' def convert_to_beautifulsoup(data): """ ็”จไบŽๅฐ†ไผ ่ฟ‡ๆฅ็š„dataๆ•ฐๆฎๅŒ…่ฃ…ๆˆBeautifulSoupๅฏน่ฑก :param data: ๅฏนๅบ”็ฝ‘้กต็š„htmlๅ†…ๅฎนๆ•ฐๆฎ :return: ๅฏนๅบ”data็š„BeautifulSoupๅฏน่ฑก """ bs = BeautifulSoup(data, "html.parser") return bs def url_pattern(): """ ๅŒน้…URL็š„ๆญฃๅˆ™่กจ่พพๅผ :return: """ pattern = '(http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?' pattern = re.compile(pattern) return pattern def get_category_link(url): """ ็ˆฌๅ–ๅฏผ่ˆชๆ ๅ„ไธชๅˆ†็ฑปไธ‹็š„URL๏ผŒๅนถๅฐ†ๅ…ถๆทปๅŠ ๅˆฐไธ€ไธชๅˆ—่กจไธญ :param URL: :return: """ navbar_links = [] data = requests.get(url).text bs = convert_to_beautifulsoup(data) navbar_contents = bs.select('.menu-item') for navbar_content in navbar_contents: pattern = url_pattern() navbar_link = pattern.search(str(navbar_content)) navbar_links.append(navbar_link.group()) return navbar_links def get_url_content(url): """ ่ฟ”ๅ›žurlๅฏนๅบ”็ฝ‘้กต็š„ๅ†…ๅฎน๏ผŒ็”จไบŽๅˆ†ๆžๅ’Œๆๅ–ๆœ‰ไปทๅ€ผ็š„ๅ†…ๅฎน :param url: ็ฝ‘้กตๅœฐๅ€ :return: urlๅฏนๅบ”็š„็ฝ‘้กตhtmlๅ†…ๅฎน """ return requests.get(url).text def get_book_card_content(url, data): """ ๅพ—ๅˆฐๆฏ้กตไนฆ็ฑๅก็‰‡็š„ๅ†…ๅฎน๏ผŒไปŽ่€Œไธบ่Žทๅ–ไนฆ็ฑไฝœ่€…ๅๅญ—ๅ’Œ้“พๆŽฅๆไพ›ๆ–นไพฟ :param url: ็ฝ‘้กต็š„urlๅœฐๅ€ :param data: urlๅฏนๅบ”็š„็ฝ‘้กตๅ†…ๅฎน :return: """ books_perpage = convert_to_beautifulsoup(data).select('h3') return books_perpage def get_url_book(url, data): """ ่Žทๅพ—ๅฏนๅบ”้กต้ขURL้“พๆŽฅๅญ˜ๆ”พ็š„ๆฏไธชไนฆ็ฑๅฏนๅบ”็š„URL :param url: ็ฝ‘้กต็š„urlๅœฐๅ€ :param data: urlๅฏนๅบ”็š„็ฝ‘้กตๅ†…ๅฎน :return: ่ฟ”ๅ›ž่ฏฅURLๆ‰€ๅœจ้กต้ข็š„ๆฏไธชไนฆ็ฑๅฏนๅบ”็š„URL็ป„ๆˆ็š„ๅˆ—่กจ """ book_links = [] # ้€š่ฟ‡h3ๆ ‡็ญพๆŸฅๆ‰พๆฏ้กตไนฆ็ฑ books_perpage = get_book_card_content(url, data) for book_content in books_perpage: pattern = url_pattern() # ่Žทๅ–ๆฏๆœฌไนฆ็š„้“พๆŽฅ book_link = pattern.search(str(book_content)) book_links.append(book_link.group()) return book_links def has_next_page(url, data): """ ๅˆคๆ–ญurlๅฏนๅบ”็š„้กต้ขๆ˜ฏๅฆๆœ‰ ไธ‹ไธ€้กต :param url: ็ฝ‘้กต็š„urlๅœฐๅ€ :param data: urlๅฏนๅบ”็š„็ฝ‘้กตๅ†…ๅฎน :return: ๆœ‰ไธ‹ไธ€้กต ่ฟ”ๅ›žไธ‹ไธ€้กตๅฏนๅบ”็š„URLๅœฐๅ€ ๆฒกๆœ‰ไธ‹ไธ€้กต ่ฟ”ๅ›žFalse """ bs = BeautifulSoup(data, "html.parser") next_page = bs.select('.next-page') if next_page: url_next_page = url_pattern().search(str(next_page)) return url_next_page.group() else: return False def get_url_books_name(url, data): """ ๅˆคๆ–ญไนฆ็ฑๅˆ—่กจไธญurlๅฏนๅบ”็š„้กต้ข็š„ไนฆๅ็ป„ๆˆ็š„ๅˆ—่กจ :param url: ็ฝ‘้กต็š„urlๅœฐๅ€ :param data: urlๅฏนๅบ”็š„็ฝ‘้กตๅ†…ๅฎน :return: ่ฟ”ๅ›žurlๅฏนๅบ”็ฝ‘ๅ€็š„ไนฆ็ฑๅ็งฐ็ป„ๆˆ็š„ๅˆ—่กจ """ books_name = [] books_perpage = get_book_card_content(url, data) for book in books_perpage: book_name = book.select('a')[0].get('title') books_name.append(book_name) return books_name def get_book_baidu_neturl(url): """ ่Žทๅ–ๆฏไธชไนฆ็ฑ่ฏฆๆƒ…้กต้ข็š„็™พๅบฆ็ฝ‘็›˜้“พๆŽฅ :param url: ๆฏๆœฌไนฆ่ฏฆๆƒ…้กต้ข็š„URL :return: ่ฟ”ๅ›žๆฏๆœฌไนฆ็š„็™พๅบฆ็ฝ‘็›˜้“พๆŽฅ๏ผŒๅฆ‚ๆžœๆฒกๆœ‰่ฟ”ๅ›ž False """ data = requests.get(url).text bs = convert_to_beautifulsoup(data) for a_links in bs.select('a'): if a_links.get_text() == '็™พๅบฆ็ฝ‘็›˜': book_baidu_url = a_links.get('href') # ๆๅ–็™พๅบฆ็ฝ‘็›˜้“พๆŽฅ็š„ๆญฃๅˆ™่กจ่พพๅผ pattern = '(http|ftp|https):\/\/pan\.[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?' pattern = re.compile(pattern) book_baidu_url = pattern.search(book_baidu_url).group() return book_baidu_url def get_book_baidu_password(url): """ ่Žทๅ–ๅฏนๅบ”url้“พๆŽฅๅญ˜ๅ‚จ็š„ไนฆ็ฑ็™พๅบฆ็ฝ‘็›˜็š„ๆๅ–ๅฏ†็  :param url: ่ฆ่Žทๅ–ๆๅ–ๅฏ†็ ็š„url้“พๆŽฅๆ‰€ๅฏนๅบ”็š„ไนฆ็ฑ :return: ๅฆ‚ๆžœๅญ˜ๅœจ่ฟ”ๅ›žๆๅ–ๅฏ†็  ๅฆๅˆ™่ฟ”ๅ›žNone """ # @TODO 1. ๅฐ่ฏ•ไฝฟ็”จ็ˆฌ่™ซ็š„ๆ–นๅผ่Žทๅ–ๆไบค็š„้กต้ขๆฅ่Žทๅพ—็™พๅบฆ็ฝ‘็›˜ๆๅ–็  # @TODO 2. ๅฆ‚ๆžœไธๅฏไปฅ็š„่ฏ๏ผŒๅฐฑไฝฟ็”จseleniumๆจกๆ‹Ÿๆต่งˆๅ™จๆฅ็ˆฌๅ–ๅ†…ๅฎนๅง browser = webdriver.Chrome() browser.get(url) try: browser.find_element_by_class_name('euc-y-s') secret_key = browser.find_element_by_class_name('euc-y-i') secret_key.send_keys(VERIFY_KEY) browser.find_element_by_class_name('euc-y-s').click() except Exception as e: browser.close() password = str(browser.find_element_by_class_name('e-secret').text) if password: return password[-4:] else: return None def get_book_author(url, data): """ ่Žทๅพ—urlๅฏนๅบ”็š„ไนฆ็ฑๅˆ—่กจ้กต้ขไธญ็š„ไฝœ่€…ๅˆ—่กจ :param url: ๅฏนๅบ”ไนฆ็ฑๅˆ—่กจ้กต้ข็š„url :param data: ๅฏนๅบ”ไนฆ็ฑๅˆ—่กจ้กต้ข็š„htmlๅ†…ๅฎน :return: ่ฟ”ๅ›žurlๅฏนๅบ”็š„ไฝœ่€…ๅˆ—่กจ """ book_authors = [] bs = convert_to_beautifulsoup(data) for book_author in bs.select('div > p > a'): book_authors.append(book_author.text) return book_authors def analy_url_page(url): """ ๅˆ†ๆžurlๅฏนๅบ”็š„็ฝ‘ๅ€๏ผŒๅŒ…ๆ‹ฌๅฆ‚ไธ‹ๅ‡ ไธชๆ–น้ข 1. ๆๅ–ๅฝ“ๅ‰urlๆ‰€ๆœ‰ไนฆ็ฑ็š„้“พๆŽฅ 2. ๅˆคๆ–ญๅฝ“ๅ‰urlๆ˜ฏๅฆๆœ‰ไธ‹ไธ€้กต๏ผŒๅฆ‚ๆžœๆœ‰, ็ปง็ปญๆญฅ้ชค3 ๅฆ‚ๆžœๆฒกๆœ‰๏ผŒ็ปง็ปญไปŽๆ–ฐ็š„ๅˆ†็ฑปๅผ€ๅง‹็ˆฌๅ–๏ผŒ ๅฆ‚ๆžœๆ–ฐ็š„ๅˆ†็ฑปๅทฒ็ป็ˆฌๅ–ๅฎŒๆˆ๏ผŒๅˆ™็ˆฌๅ–ๅฎŒๆˆ 3. ่Žทๅ–ๅฝ“ๅ‰้กต้ขๆ‰€ๆœ‰ไนฆ็ฑ๏ผŒๅนถไพๆฌกไธบๆฏไธชไนฆ็ฑๅˆ›ๅปบๅฏน่ฑก(่ฟ›่กŒๅˆๅง‹ๅŒ–๏ผŒ็ˆฌๅ–ไนฆ็ฑ็š„ๅ็งฐใ€ไฝœ่€…ๅใ€ไนฆ็ฑ่ฏฆๆƒ…้กตใ€ไนฆ็ฑ็™พๅบฆ็ฝ‘็›˜ๅœฐๅ€ใ€ไนฆ็ฑ็™พๅบฆ็ฝ‘็›˜ๆๅ–็ ) 4. ็ปง็ปญๆญฅ้ชค2 :param url: ็ฝ‘้กต็š„urlๅœฐๅ€ :return: None """ while url: data = get_url_content(url) url_links_page = get_url_book(url, data) url_next_page = has_next_page(url, data) books_name = get_url_books_name(url, data) for i in range(len(books_name)): book_name = books_name[i] book_author = get_book_author(url, data)[i] book_info_url = url_links_page[i] book_baidu_url = get_book_baidu_neturl(url_links_page[i]) book_baidu_password = get_book_baidu_password(url_links_page[i]) book = model.Book(book_name, book_info_url, book_author, book_baidu_url, book_baidu_password) print(book) if url_next_page: url = url_next_page else: break if __name__ == '__main__': root_url = URL for url in get_category_link(root_url): analy_url_page(url)
# Generated by Django 3.2.3 on 2021-06-01 19:32 from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion import pyuploadcare.dj.models import tinymce.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('dp', pyuploadcare.dj.models.ImageField()), ('bio', tinymce.models.HTMLField(max_length=500)), ('phone_number', models.BigIntegerField()), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Posts', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50)), ('description', models.CharField(max_length=5000, null=True)), ('link', models.URLField()), ('image1', pyuploadcare.dj.models.ImageField()), ('image2', pyuploadcare.dj.models.ImageField()), ('image3', pyuploadcare.dj.models.ImageField()), ('postedon', models.DateTimeField(auto_now_add=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-id'], }, ), migrations.CreateModel( name='Likes', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('design', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])), ('usability', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])), ('creativity', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])), ('content', models.IntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])), ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='core.posts')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Comments', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('comment', models.CharField(max_length=2000)), ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='core.posts')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)), ], ), ]
""" Given a binary tree, return the level order traversal of its nodesโ€™ values. (ie, from left to right, level by level). Example : Given binary tree 3 / \ 9 20 / \ 15 7 return its level order traversal as: [ [3], [9,20], [15,7] ] Also think about a version of the question where you are asked to do a level order traversal of the tree when depth of the tree is much greater than number of nodes on a level. """ from collections import deque # Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: # @param A : root node of tree # @return a list of list of integers def levelOrder(self, A): level = [] q = deque() q.append(A) while q: count = len(q) temp_level = [] while count > 0: count -= 1 temp = q.popleft() temp_level.append(temp.val) if temp.left: q.append(temp.left) if temp.right: q.append(temp.right) level.append(temp_level) return level
# -*- coding: utf-8 -*- # # Copyright (c) 2018~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from typing import * import binascii import hashlib import os ALGORITHMS = set(hashlib.algorithms_available) ALGORITHMS.add('crc32') class Crc32Proxy: def __init__(self): self._value = 0 def update(self, buffer): self._value = binascii.crc32(buffer, self._value) def hexdigest(self): return "%08x" % self._value def _create(algorithm: str): if algorithm == 'crc32': return Crc32Proxy() return hashlib.new(algorithm) class Hasher: def __init__(self, path: str, algorithms: Tuple[str, ...], *, blocksize=1024 * 64): for algorithm in algorithms: if not algorithm in ALGORITHMS: raise ValueError(f'unsupport algorithm: {algorithm}') self._path = path self._algorithms = algorithms self._blocksize = blocksize self._result = None self._total_read = 0 # lazy init: self._total_size = None self._stream = None self._hashers = None def __enter__(self): self._stream = open(self._path, 'rb') self._hashers = [_create(x) for x in self._algorithms] return self def __exit__(self, exc_type, exc_val, exc_tb): self._stream.close() def read_block(self) -> bool: if self._result is not None: return False buffer = self._stream.read(self._blocksize) if buffer: self._total_read += len(buffer) for m in self._hashers: m.update(buffer) else: self._result = tuple(m.hexdigest() for m in self._hashers) return True @property def total_read(self): return self._total_read @property def total_size(self): if self._total_size is None: self._total_size = os.path.getsize(self._path) return self._total_size @property def progress(self): if self.total_size == 0: return 1. return self._total_read / self.total_size @property def result(self) -> Tuple[str, ...]: if self._result is None: raise RuntimeError return self._result
# Copyright (c) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE.md file in the project root # for full license information. # ============================================================================== """ Unit tests for function extension """ from __future__ import division, print_function import numpy as np import pytest from cntk import * from cntk.train.trainer import * from cntk.learners import * from cntk.ops.functions import UserFunction from .ops_test_utils import AA class MyPlus(UserFunction): def __init__(self, arg1, arg2, name='f1'): super(MyPlus, self).__init__([arg1, arg2], name=name) self.forward_calls = 0 self.backward_calls = 0 def infer_outputs(self): return [output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def clone(self, cloned_inputs): return MyPlus(cloned_inputs[0], cloned_inputs[1]) def forward(self, arguments, device=None, outputs_to_retain=None): assert len(self.inputs)==2 result = arguments[0] + arguments[1] self.forward_calls += 1 return None, result def backward(self, state, root_gradients, input_gradients): self.backward_calls += 1 for input in input_gradients: input_gradients[input] = root_gradients def test_ext_eval_1(): dim = 4 p = parameter(shape=(dim,), init=10, name='p') i = sequence.input(dim, needs_gradient=True, name='i_var') m = user_function(MyPlus(i, constant(3))) z = m+p input_data = np.random.rand(dim) result = z.eval([input_data]) assert np.allclose(result[0][0], input_data+3+10) def test_ext_eval_2_only_param(): dim = 4 p = parameter(shape=(dim,), init=10, name='p') i = sequence.input(dim, needs_gradient=True, name='i_var') m = user_function(MyPlus(p, constant(3))) # combine does not work # z = combine([m.output]) z = m+i input_data = np.random.rand(dim) result = z.eval([input_data]) assert np.allclose(result[0][0], input_data+3+10) def test_ext_eval_3_no_input(): dim = 4 p = parameter(shape=(dim,), init=10, name='p') m = user_function(MyPlus(p, constant(3))) z = m+0 result = z.eval() # No batch dimension since we have no input assert np.allclose(result, np.zeros_like(p)+10+3) def test_ext_eval_4_a_inside_graph(): dim = 4 p_init = 10 p = parameter(shape=(dim,), init=p_init, name='p') m = user_function(MyPlus(p, constant(3))) z = p * m result = z.eval() # No batch dimension since we have no input assert np.allclose(result, ((p_init*np.ones_like(result))+3)*p_init) def test_ext_eval_4_b_inside_graph(): dim = 4 p_init = 10 p = parameter(shape=(dim,), init=p_init, name='p') z = user_function(p * MyPlus(p, constant(3))) result = z.eval() # No batch dimension since we have no input assert np.allclose(result, ((p_init*np.ones_like(result))+3)*p_init) def test_ext_eval_5_times(): dim = 2 p_init = 10 p = parameter(shape=(dim,), init=p_init, name='p') m = user_function(MyPlus(p, constant(3))) z = times(m, parameter(shape=(2,50), init=2)) result = z.eval() # No batch dimension since we have no input assert np.allclose(result, ((p_init*np.ones_like(result))+3)*2*2) def test_ext_eval_6_clone(): dim = 4 i = sequence.input(dim, needs_gradient=True, name='i_var') m = i + 3 p = parameter(shape=(dim,), init=10, name='p') z = m + p m_udf = user_function(MyPlus(i, constant(3))) z_clone = z.clone('share', {m : m_udf} ); input_data = np.random.rand(dim) result = z_clone.eval([input_data]) assert np.allclose(result[0][0], input_data+3+10) def test_ext_eval_7_placeholder(): dim = 4 p = parameter(shape=(dim,), init=10, name='p') i = sequence.input(dim, needs_gradient=True, name='i_var') pl = placeholder() m = user_function(MyPlus(pl, constant(3))) z = m+p z.replace_placeholder(i) input_data = np.random.rand(dim) result = z.eval([input_data]) assert np.allclose(result[0][0], input_data+3+10) def test_ext_train(): dim = 4 p = parameter(shape=(dim,), init=10) i = sequence.input(dim, needs_gradient=True, name='i_var') m = MyPlus(i, constant(3)) # keeping m unwrapped since we need to access its member variables z = user_function(m)+p momentum_time_constant = momentum_as_time_constant_schedule(1100) lr_per_sample = learning_rate_schedule(0.007, UnitType.sample) trainer = Trainer(z, (z+0, z+0), \ [momentum_sgd(z.parameters, lr_per_sample, momentum_time_constant, True)]) i = 0 while i<100: i+=1 input_data = np.random.rand(dim) trainer.train_minibatch([input_data]) assert m.forward_calls == m.backward_calls == 100 def test_udf_clone(): dim = 4 i = sequence.input(dim, needs_gradient=True, name='i_var') m_udf = user_function(MyPlus(i, constant(3))) p = parameter(shape=(dim,), init=10, name='p') z = m_udf + p z_clone = z.clone('share'); input_data = np.random.rand(dim) result = z_clone.eval([input_data]) assert np.allclose(result[0][0], input_data+3+10) @pytest.mark.parametrize("payload", [ (np.asarray([[[1,2,3.0]]]),), (77,), ("a", 2), (), (None) ]) def test_ext_backpropstate(payload): class TestBackPropState(UserFunction): def __init__(self, arg, payload, name='f1'): self.payload = payload super(TestBackPropState, self).__init__([arg]) def infer_outputs(self): return [output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): return self.payload, argument def backward(self, state, root_gradients): assert state == self.payload return root_gradients dim = 4 p = parameter(shape=(dim,), init=10) in1 = input(dim, needs_gradient=True, name='i_var') m = user_function(TestBackPropState(in1, payload)) z = m+p lr_per_sample = learning_rate_schedule(0.007, UnitType.sample) trainer = Trainer(None, (z), [sgd(z.parameters, lr_per_sample)]) for i in range(100): input_data = np.random.rand(dim) trainer.train_minibatch({in1:[input_data]}) class LambdaFunc(UserFunction): def __init__(self, arg, when=lambda arg: True, execute=lambda arg: print(arg), name=''): self.when = when self.execute = execute super(LambdaFunc, self).__init__([arg], name=name) def infer_outputs(self): return [output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, argument, device=None, outputs_to_retain=None): if self.when(argument): self.execute(argument) return None, argument def backward(self, state, root_gradients): return root_gradients def test_ext_lambdafunc(): dim = 4 class CallbackCounter(object): def __init__(self): self.count = 0 def inc(self, arg): self.count += 1 cb = CallbackCounter() p = parameter(shape=(dim,), init=1) i = input(dim, needs_gradient=True, name='i_var') k = i*p m = LambdaFunc(k, when=lambda arg: np.sum(arg)>1, execute=cb.inc) m = user_function(m) z = m+0 momentum_time_constant = momentum_as_time_constant_schedule(1100) lr_per_sample = learning_rate_schedule(0.007, UnitType.sample) trainer = Trainer(z, (z+0, z+0), \ [momentum_sgd(z.parameters, lr_per_sample, momentum_time_constant, True)]) i = 0 input_data = 0.1 * np.ones(dim) trainer.train_minibatch([input_data]) assert cb.count == 0 input_data = 0.3 * np.ones(dim) trainer.train_minibatch([input_data]) assert cb.count == 1 class PlusAndLast(UserFunction): impl_func = None def __init__(self, arg1, arg2, name='f1'): i1 = input(arg1.shape, arg1.dtype, name='i1', dynamic_axes=arg1.dynamic_axes) i2 = input(arg2.shape, arg2.dtype, name='i2', dynamic_axes=arg2.dynamic_axes) self.impl_func = sequence.last(i1 + sequence.broadcast_as(i2, i1)) super(PlusAndLast, self).__init__([arg1, arg2], name=name) def infer_outputs(self): impl_func_output = self.impl_func.output return [output_variable(impl_func_output.shape, impl_func_output.dtype, impl_func_output.dynamic_axes)] def forward(self, arguments, device=None, outputs_to_retain=None): _, result = self.impl_func.forward({self.impl_func.arguments[0] : arguments[0], self.impl_func.arguments[1] : arguments[1]}, [self.impl_func.output]) return None, result[self.impl_func.output] def test_udf_plus_and_last(): x = sequence.input(shape=(2,)) y = input(shape=(2,)) func = user_function(PlusAndLast(x, y)) dt_precision = np.float32 operand1 = [AA([[1., 2.], [3., 4.]], dtype=dt_precision)] operand2 = [AA([2., 2.], dtype=dt_precision)] _, result = func.forward({x : operand1, y : operand2}, [func.output]) expected_forward = AA([[[5., 6.]]], dtype=dt_precision) assert np.allclose(result[func.output], expected_forward) class MultiOutputUserFunction(UserFunction): def __init__(self, arg1, arg2, name='f1'): super(MultiOutputUserFunction, self).__init__([arg1, arg2], name=name) def infer_outputs(self): return [output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes), output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, arguments, outputs, device=None, outputs_to_retain=None): assert len(self.inputs)==2 outputs[self.outputs[0]] = arguments[0] + 2*arguments[1] outputs[self.outputs[1]] = 2*arguments[0] + arguments[1] return None def backward(self, state, root_gradients, variables): if self.inputs[0] in variables: variables[self.inputs[0]] = root_gradients[self.outputs[0]] + 2*root_gradients[self.outputs[1]] if self.inputs[1] in variables: variables[self.inputs[1]] = 2*root_gradients[self.outputs[0]] + root_gradients[self.outputs[1]] def test_multioutput_udf(): dim = 2 x = sequence.input(dim, needs_gradient=True, name='x') y = sequence.input(dim, needs_gradient=True, name='y') op = user_function(MultiOutputUserFunction(x, y)) x_data = [AA([[1., 2.], [3., 4.]], dtype=np.float32)] y_data = [AA([[5., 6.], [7., 8.]], dtype=np.float32)] result = op.eval({x : x_data, y : y_data}) assert np.allclose(result[op.outputs[0]], x_data[0] + 2*y_data[0]) assert np.allclose(result[op.outputs[1]], 2*x_data[0] + y_data[0]) op = op.outputs[0] + op.outputs[1] gradients = op.grad({x : x_data, y : y_data}, op.arguments) assert np.allclose(gradients[op.arguments[0]], [[[3., 3.], [3., 3.]]]) assert np.allclose(gradients[op.arguments[1]], [[[3., 3.], [3., 3.]]]) def test_udf_op_name(): dim = 4 p = parameter(shape=(dim,), init=10, name='p') i = input(dim, needs_gradient=True, name='i_var') m = user_function(MyPlus(i, constant(3))) assert str(m.root_function) != '' class MyPlusWithNoGradientToRightOperand(UserFunction): def __init__(self, arg1, arg2, name='f1'): super(MyPlusWithNoGradientToRightOperand, self).__init__([arg1, arg2], name=name) def infer_outputs(self): return [output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes)] def forward(self, arguments, device=None, outputs_to_retain=None): assert len(self.inputs)==2 result = arguments[0] + arguments[1] return None, result def backward(self, state, root_gradients, input_gradients): input_gradients[self.inputs[0]] = root_gradients def test_udf_no_gradient_for_some_inputs(): dim = 2 x = sequence.input(dim, needs_gradient=True, name='x') y = sequence.input(dim, needs_gradient=True, name='y') op = user_function(MyPlusWithNoGradientToRightOperand(x, y)) x_data = [AA([[1., 2.], [3., 4.]], dtype=np.float32)] y_data = [AA([[5., 6.], [7., 8.]], dtype=np.float32)] gradients, result = op.grad({x : x_data, y : y_data}, op.arguments, [op.output]) assert np.allclose(gradients[op.arguments[0]], [[[1., 1.], [1., 1.]]]) assert np.allclose(gradients[op.arguments[1]], [[[0., 0.], [0., 0.]]]) assert np.allclose(result, [[[6., 8.], [10., 12.]]]) class MyPlusWithNoGradientNeededForOutput(UserFunction): def __init__(self, arg1, arg2, name='f1'): super(MyPlusWithNoGradientNeededForOutput, self).__init__([arg1, arg2], name=name) def infer_outputs(self): return [output_variable(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes, needs_gradient=False)] def forward(self, arguments, device=None, outputs_to_retain=None): assert len(self.inputs)==2 result = arguments[0] + arguments[1] return None, result def backward(self, state, root_gradients, input_gradients): raise ValueError("MyPlusWithNoGradientNeededForOutput does not need gradient for output and backward must not be called") def test_udf_output_needs_no_gradient(): dim = 2 x = sequence.input(dim, needs_gradient=True, name='x') y = sequence.input(dim, needs_gradient=True, name='y') op = user_function(MyPlusWithNoGradientNeededForOutput(x, y)) x_data = [AA([[1., 2.], [3., 4.]], dtype=np.float32)] y_data = [AA([[5., 6.], [7., 8.]], dtype=np.float32)] gradients, result = op.grad({x : x_data, y : y_data}, op.arguments, [op.output]) assert np.allclose(gradients[op.arguments[0]], [[[0., 0.], [0., 0.]]]) assert np.allclose(gradients[op.arguments[1]], [[[0., 0.], [0., 0.]]]) assert np.allclose(result, [[[6., 8.], [10., 12.]]])
import server def main(): s = server.Server() s.startServer() main()
import matplotlib.patches as mpatches import matplotlib.pyplot as plt styles = mpatches.ArrowStyle.get_styles() figheight = (len(styles)+.5) fig1 = plt.figure(1, (4, figheight)) fontsize = 0.3 * fig1.dpi ax = fig1.add_axes([0, 0, 1, 1], frameon=False, aspect=1.) ax.set_xlim(0, 4) ax.set_ylim(0, figheight) for i, (stylename, styleclass) in enumerate(sorted(styles.items())): y = (float(len(styles)) -0.25 - i) # /figheight p = mpatches.Circle((3.2, y), 0.2, fc="w") ax.add_patch(p) ax.annotate(stylename, (3.2, y), (2., y), #xycoords="figure fraction", textcoords="figure fraction", ha="right", va="center", size=fontsize, arrowprops=dict(arrowstyle=stylename, patchB=p, shrinkA=5, shrinkB=5, fc="w", ec="k", connectionstyle="arc3,rad=-0.05", ), bbox=dict(boxstyle="square", fc="w")) ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) plt.draw() plt.show()
#coding=utf8 """ # Author: Kellan Fan # Created Time : Fri 29 Jan 2021 01:32:29 PM CST # File Name: movie.py # Description: """ from app.common.pg_client import Mypostgres from flask_restful import reqparse, abort, Resource parser = reqparse.RequestParser() parser.add_argument('name', type=str, required=True, help="Name cannot be blank!") class Movie(Resource): def post(self): args = parser.parse_args() ret = handle_movie(args['name']) if ret: return ret else: abort(404, message="movie [{}] doesn't exist".format(args['name'])) def handle_movie(name): client = Mypostgres() sql = "select name, content from piaohua where name like '%{}%'".format(name) ret = client.execute(sql) final = {} try: for num in range(len(ret)): final['movie' + str(num)] = { "name": ret[num][0], "content": ret[num][1]} return final except: return None
#!/usr/bin/env python3 # File name: iaa.py # Description: Computes Cohen and Fleiss kappa statistics # Author: Louis de Bruijn # Date: 14-07-2020 import json import numpy as np import pandas as pd from sklearn.metrics import cohen_kappa_score from statsmodels.stats.inter_rater import fleiss_kappa def cohen_kappa_function(ann1, ann2): """Computes Cohen kappa for pair-wise annotators. :param ann1: annotations provided by first annotator :type ann1: list :param ann2: annotations provided by second annotator :type ann2: list :rtype: float :return: Cohen kappa statistic """ count = 0 for an1, an2 in zip(ann1, ann2): if an1 == an2: count += 1 A = count / len(ann1) # observed agreement A (Po) uniq = set(ann1 + ann2) E = 0 # expected agreement E (Pe) for item in uniq: cnt1 = ann1.count(item) cnt2 = ann2.count(item) count = (cnt1 / len(ann1)) * (cnt2 / len(ann2)) E += count return round((A - E) / (1 - E), 4) def fleiss_kappa_function(M): """Computes Fleiss' kappa for group of annotators. :param M: a matrix of shape (:attr:'N', :attr:'k') with 'N' = number of subjects and 'k' = the number of categories. 'M[i, j]' represent the number of raters who assigned the 'i'th subject to the 'j'th category. :type: numpy matrix :rtype: float :return: Fleiss' kappa score """ N, k = M.shape # N is # of items, k is # of categories n_annotators = float(np.sum(M[0, :])) # # of annotators tot_annotations = N * n_annotators # the total # of annotations category_sum = np.sum(M, axis=0) # the sum of each category over all items # chance agreement p = category_sum / tot_annotations # the distribution of each category over all annotations PbarE = np.sum(p * p) # average chance agreement over all categories # observed agreement P = (np.sum(M * M, axis=1) - n_annotators) / (n_annotators * (n_annotators - 1)) Pbar = np.sum(P) / N # add all observed agreement chances per item and divide by amount of items return round((Pbar - PbarE) / (1 - PbarE), 4) def main(): with open("pairwise.json", "r") as cohen_f: pairwise = json.load(cohen_f) cohen_function = cohen_kappa_function(pairwise["ann2"], pairwise["ann6"]) cohen_sklearn = cohen_kappa_score(pairwise["ann2"], pairwise["ann6"]) with open("group1.json", "r") as fleiss_f: group1 = json.load(fleiss_f) df = pd.DataFrame(group1) matrix = df.values # convert Pandas DataFrame to Numpy matrix fleiss_function = fleiss_kappa_function(matrix) fleiss_statsmodels = fleiss_kappa(matrix) print("\n--Our functions--") print("Cohen Kappa score for ann2 and ann6: {0}.".format(cohen_function)) print("Fleiss Kappa score for group 1: {0}.".format(fleiss_function)) print("\n--Imported functions--") print("Cohen Kappa score for ann2 and ann6: {0}.".format(cohen_sklearn)) print("Fleiss Kappa score for group 1: {0}.".format(fleiss_statsmodels)) if __name__ == "__main__": main()
from enum import Enum class LensType(Enum): Undefined = 0 DTypeLenWithDistanceEncoder = 1 VRLensWithAntiVibrationMechanism = 2 DXLensExclusiveUseOfNikon = 4 AFSLens = 8 LensSupportingAutomaticDistortionCorrection = 16 Unknown1 = 32 Unknown2 = 64
from datetime import date from ....import_utils import * from ....models_dict import MODEL_REQUIREMENTS if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-audio-tfhub-speech_embedding']): import tensorflow as tf import tensorflow_hub as hub import traceback from ....base import catch_vector_errors from ....doc_utils import ModelDefinition from ..base import BaseAudio2Vec SpeechEmbeddingModelDefinition = ModelDefinition(markdown_filepath="encoders/audio/tfhub/speech_embedding.md") __doc__ = SpeechEmbeddingModelDefinition.create_docs() class SpeechEmbedding2Vec(BaseAudio2Vec): definition = SpeechEmbeddingModelDefinition def __init__(self, model_url: str = 'https://tfhub.dev/google/speech_embedding/1', signature: str = 'default'): self.model_url = model_url self.signature = signature self.model = hub.load(self.model_url).signatures[self.signature] self.model_name = self.model_url.replace( 'https://tfhub.dev/google/', '').replace('/', '_') self.vector_length = 96 @property def urls(self): return { 'https://tfhub.dev/google/speech_embedding/1': {'vector_length': 96} } @catch_vector_errors def encode(self, audio, vector_operation='mean'): """ Encode the vector. Example: >>> from vectorhub.encoders.audio import SpeechEmbedding2Vec >>> encoder = SpeechEmbedding2Vec() >>> encoder.encode(...) """ if isinstance(audio, str): audio = self.read(audio) return self._vector_operation(self.model(tf.constant([audio]))[self.signature][0], vector_operation=vector_operation)[0] @catch_vector_errors def bulk_encode(self, audios, vector_operation='mean'): # TODO: Change list comprehension to tensor. # audios = [self.read(audio) if isinstance(audio, str) else audio for audio in audios] # return self._vector_operation(self.model(tf.constant(audios))[self.signature][0], vector_operation=vector_operation) # TODO: Change list comprehension to tensor. return [self.encode(x, vector_operation=vector_operation) for x in audios]
from Homework_6.Exercise_1 import book_dict, role_dict, base_url_book, base_url_role, add_new_item, add_item_id, check_item_in_list, \ compare_dicts, check_new_item, role_update_and_check, delete_item_finally import pytest import uuid import random class TestAddNewRole: @pytest.fixture() def setup_and_teardown(self): self.book_id = add_new_item(base_url_book, book_dict) self.role_dict = role_dict role_dict["book"] = "{}{}".format(base_url_book, self.book_id) yield delete_item_finally(base_url_role, self.role_id) delete_item_finally(base_url_book, self.book_id) def test_new_role_id_type(self, setup_and_teardown): self.role_id = add_new_item(base_url_role, role_dict) assert type(self.role_id) == str def test_new_role_id_emptiness(self, setup_and_teardown): self.role_id = add_new_item(base_url_role, role_dict) assert self.role_id def test_creation_without_level(self, setup_and_teardown): wrong_role = {"name": "Mtsiri", "type": "classic", "book": "{}{}".format(base_url_book, self.book_id), "level": None } self.role_id = add_new_item(base_url_role, wrong_role) assert type(self.role_id) == str def test_creation_without_book(self, setup_and_teardown): wrong_role = {"name": "Mtsiri", "type": "classic", "book": None, "level": 122 } self.role_id = add_new_item(base_url_role, wrong_role) assert self.role_id class TestAddNewRoleExept: def test_creation_without_name(self, wrong_roles_test): (wrong_role_dict, error) = wrong_roles_test with pytest.raises(Exception) as ex: add_new_item(base_url_role, wrong_role_dict) assert error == str(ex.value) class TestAddItemIdFunc: def test_correct_addition(self, param_test): (input, expected_output) = param_test add_item_id(input, expected_output) assert input["id"] == expected_output class TestDictCompareFunc: @pytest.fixture() def setup(self): self.dict1 = {"name": "Mtsiri", "type": "classic", "book": "http://pulse-rest-testing.herokuapp.com/books/6631", "level": 1212, "id": 22} self.dict2 = {"name": "Mtsiri", "type": "classic", "book": "http://pulse-rest-testing.herokuapp.com/books/6631", "level": 1212, "id": 22} self.dict3 = {"name": "Mtsiri", "type": "classic", "book": "http://pulse-rest-testing.herokuapp.com/books/6631", "level": 1212, "id": 14} def test_compare_equal(self,setup): result = compare_dicts(self.dict1, self.dict2) assert result is None def test_compare_not_equal(self, setup): with pytest.raises(Exception) as ex: compare_dicts(self.dict2, self.dict3) assert "Dicts are not equal" == str(ex.value) class TestCheckNewItemRole: def setup_method(self): self.book_id = add_new_item(base_url_book, book_dict) self.role_dict = role_dict role_dict["book"] = "{}{}".format(base_url_book, self.book_id) self.role_id = add_new_item(base_url_role, role_dict) def teardown_method(self): delete_item_finally(base_url_book, self.book_id) delete_item_finally(base_url_role, self.role_id) def test_check_correct_data(self): result = check_new_item(base_url_role, self.role_id, role_dict) assert result is None def test_check_with_wrong_id(self): with pytest.raises(Exception) as ex: check_new_item(base_url_role, str(random.randint(4000000000, 9120000001)), role_dict) assert "Wrong request" == str(ex.value) def test_role_in_list(self): result = check_item_in_list(base_url_role, self.role_id, role_dict) assert result is None def test_role_in_list_wrong_id(self): with pytest.raises(Exception) as ex: check_item_in_list(base_url_role, str(random.randint(4000000000, 9120000001)), role_dict) assert "The item is not in the list" == str(ex.value) class TestRoleUpdate: def setup_method(self): self.book_id = add_new_item(base_url_book, book_dict) self.role_dict = role_dict role_dict["book"] = "{}{}".format(base_url_book, self.book_id) self.role_id = add_new_item(base_url_role, role_dict) def teardown_method(self): delete_item_finally(base_url_book, self.book_id) delete_item_finally(base_url_role, self.role_id) def test_updated_data(self): result = role_update_and_check(base_url_role, self.role_id, role_dict, new_name=str(uuid.uuid4()), new_type=str(uuid.uuid4()), new_book=role_dict["book"], new_level=random.randint(40000000, 2147483647)) assert result is None def test_update_one_parameter(self): result = role_update_and_check(base_url_role, self.role_id, role_dict, new_name="vova", new_book=role_dict["book"]) assert result is None def test_update_attempt_wrong_id(self): with pytest.raises(Exception) as ex: role_update_and_check(base_url_role, str(random.randint(4000000000, 9120000001)), role_dict, new_name=str(uuid.uuid4()), new_type=str(uuid.uuid4())) assert "Wrong request" == str(ex.value) def test_attempt_update_wrong_url(self): with pytest.raises(Exception) as ex: role_update_and_check(base_url_role+"wrong", str(random.randint(4000000000, 9120000001)), role_dict, new_name=str(uuid.uuid4()), new_type=str(uuid.uuid4())) assert "Wrong request" == str(ex.value) class TestDeleteItemFunc: @pytest.fixture() def setup(self): self.book_id = add_new_item(base_url_book, book_dict) self.role_dict = role_dict role_dict["book"] = "{}{}".format(base_url_book, self.book_id) self.role_id = add_new_item(base_url_role, role_dict) yield delete_item_finally(base_url_book, self.book_id) def test_delete_existent(self, setup): # new_test_id = add_new_item(base_url_role, role_dict) result = delete_item_finally(base_url_role, self.role_id) assert result is None def test_try_delete_with_id_none(self): with pytest.raises(Exception) as ex: delete_item_finally(base_url_role, None) assert "Item id is None" == str(ex.value) def test_try_delete_with_wrong_id(self): with pytest.raises(Exception) as ex: delete_item_finally(base_url_role, str(random.randint(4000000000, 9120000001))) assert "Wrong request status code. Item hasn't been deleted" == str(ex.value)
""" Network architecture visualizer using graphviz """ import sys from epe_darts import genotypes as gt if __name__ == '__main__': if len(sys.argv) != 2: raise ValueError("usage:\n python {} GENOTYPE".format(sys.argv[0])) genotype_str = sys.argv[1] try: genotype = gt.from_str(genotype_str) except AttributeError: raise ValueError("Cannot parse {}".format(genotype_str)) gt.plot(genotype.normal, "normal") gt.plot(genotype.reduce, "reduction")
""" This module is the geometrical part of the ToFu general package It includes all functions and object classes necessary for tomography on Tokamaks """ # Built-in import sys import os import warnings import copy # Common import numpy as np import scipy.interpolate as scpinterp import scipy.stats as scpstats import datetime as dtm import matplotlib.pyplot as plt import matplotlib as mpl # ToFu-specific from tofu import __version__ as __version__ import tofu.pathfile as tfpf import tofu.utils as utils from . import _def as _def from . import _GG as _GG from . import _core from . import _check_optics from . import _comp_optics as _comp_optics from . import _plot_optics as _plot_optics import tofu.spectro._rockingcurve as _rockingcurve __all__ = ['CrystalBragg'] _Type = 'Tor' _NTHREADS = 16 # rotate / translate instance _RETURN_COPY = False _USE_NON_PARALLELISM = True """ ############################################################################### ############################################################################### Ves class and functions ############################################################################### ############################################################################### """ class CrystalBragg(utils.ToFuObject): """ A class defining crystals for Bragg diffraction A crystal can be of Type flat, cylindrical or spherical It is characterized by its: - geometry (Type, dimensions, curvature radii and position/orientation) - Material and lattice - Bragg parameters (angle vs lambda) Parameters ---------- Id : str / tfpf.ID A name string or a pre-built tfpf.ID class to be used to identify this particular instance, if a string is provided, it is fed to tfpf.ID() dgeom : dict An array (2,N) or (N,2) defining the contour of the vacuum vessel in a cross-section, if not closed, will be closed automatically dspectral: str Flag indicating whether the vessel will be a torus ('Tor') or a linear device ('Lin') SavePath : None / str If provided, forces the default saving path of the object to the provided value """ # Fixed (class-wise) dictionary of default properties _ddef = { 'Id': { 'shot': 0, 'Exp': 'dummy', 'Diag': 'dummy', 'include': [ 'Mod', 'Cls', 'Exp', 'Diag', 'Name', 'shot', 'version', ], }, 'dgeom': {'Type': 'sph', 'Typeoutline': 'rect'}, 'dmat': {}, 'dbragg': {'braggref': np.pi/4.}, 'dmisc': {'color': 'k'}, } _dplot = {'cross':{'Elt':'P', 'dP':{'color':'k','lw':2}, 'dI':{'color':'k','ls':'--','marker':'x','ms':8,'mew':2}, 'dBs':{'color':'b','ls':'--','marker':'x','ms':8,'mew':2}, 'dBv':{'color':'g','ls':'--','marker':'x','ms':8,'mew':2}, 'dVect':{'color':'r','scale':10}}, 'hor':{'Elt':'P', 'dP':{'color':'k','lw':2}, 'dI':{'color':'k','ls':'--'}, 'dBs':{'color':'b','ls':'--'}, 'dBv':{'color':'g','ls':'--'}, 'Nstep':50}, '3d':{}} # _DEFLAMB = 3.971561e-10 # _DEFNPEAKS = 12 # _DREFLECT_DTYPES = {'specular':0, 'diffusive':1, 'ccube':2} # Does not exist beofre Python 3.6 !!! def __init_subclass__(cls, color='k', **kwdargs): # Python 2 super(CrystalBragg,cls).__init_subclass__(**kwdargs) # Python 3 #super().__init_subclass__(**kwdargs) cls._ddef = copy.deepcopy(CrystalBragg._ddef) cls._dplot = copy.deepcopy(CrystalBragg._dplot) cls._set_color_ddef(cls._color) @classmethod def _set_color_ddef(cls, color): cls._ddef['dmisc']['color'] = mpl.colors.to_rgba(color) def __init__(self, dgeom=None, dmat=None, dbragg=None, Id=None, Name=None, Exp=None, Diag=None, shot=None, fromdict=None, sep=None, SavePath=os.path.abspath('./'), SavePath_Include=tfpf.defInclude, color=None): # To replace __init_subclass__ for Python 2 if sys.version[0]=='2': self._dstrip = utils.ToFuObjectBase._dstrip.copy() self.__class__._strip_init() # Create a dplot at instance level self._dplot = copy.deepcopy(self.__class__._dplot) kwdargs = locals() del kwdargs['self'] # super() super(CrystalBragg,self).__init__(**kwdargs) def _reset(self): # super() super(CrystalBragg,self)._reset() self._dgeom = dict.fromkeys(self._get_keys_dgeom()) self._dmat = dict.fromkeys(self._get_keys_dmat()) self._dbragg = dict.fromkeys(self._get_keys_dbragg()) self._dmisc = dict.fromkeys(self._get_keys_dmisc()) #self._dplot = copy.deepcopy(self.__class__._ddef['dplot']) @classmethod def _checkformat_inputs_Id(cls, Id=None, Name=None, Exp=None, Diag=None, shot=None, Type=None, include=None, **kwdargs): if Id is not None: assert isinstance(Id,utils.ID) Name, Exp, Type = Id.Name, Id.Exp, Id.Type if Type is None: Type = cls._ddef['dgeom']['Type'] if Exp is None: Exp = cls._ddef['Id']['Exp'] if Diag is None: Diag = cls._ddef['Id']['Diag'] if shot is None: shot = cls._ddef['Id']['shot'] if include is None: include = cls._ddef['Id']['include'] dins = {'Name':{'var':Name, 'cls':str}, 'Exp': {'var':Exp, 'cls':str}, 'Diag': {'var':Diag, 'cls':str}, 'shot': {'var':shot, 'cls':int}, 'Type': {'var':Type, 'in':['sph']}, 'include':{'var':include, 'listof':str}} dins, err, msg = cls._check_InputsGeneric(dins) if err: raise Exception(msg) kwdargs.update({'Name':Name, 'shot':shot, 'Exp':Exp, 'Diag':Diag, 'Type':Type, 'include':include}) return kwdargs ########### # Get largs ########### @staticmethod def _get_largs_dgeom(sino=True): largs = ['dgeom'] return largs @staticmethod def _get_largs_dmat(): largs = ['dmat'] return largs @staticmethod def _get_largs_dbragg(): largs = ['dbragg'] return largs @staticmethod def _get_largs_dmisc(): largs = ['color'] return largs ########### # Get keys of dictionnaries ########### @staticmethod def _get_keys_dgeom(): lk = ['Type', 'Typeoutline', 'summit', 'center', 'extenthalf', 'surface', 'nin', 'nout', 'e1', 'e2', 'rcurve', 'move', 'move_param', 'move_kwdargs'] return lk @staticmethod def _get_keys_dmat(): lk = ['formula', 'density', 'symmetry', 'lengths', 'angles', 'cut', 'd', 'alpha', 'beta', 'nin', 'nout', 'e1', 'e2'] return lk @staticmethod def _get_keys_dbragg(): lk = ['rockingcurve', 'lambref', 'braggref'] return lk @staticmethod def _get_keys_dmisc(): lk = ['color'] return lk ########### # _init ########### def _init(self, dgeom=None, dmat=None, dbragg=None, color=None, **kwdargs): allkwds = dict(locals(), **kwdargs) largs = self._get_largs_dgeom() kwds = self._extract_kwdargs(allkwds, largs) self.set_dgeom(**kwds) largs = self._get_largs_dmat() kwds = self._extract_kwdargs(allkwds, largs) self.set_dmat(**kwds) largs = self._get_largs_dbragg() kwds = self._extract_kwdargs(allkwds, largs) self.set_dbragg(**kwds) largs = self._get_largs_dmisc() kwds = self._extract_kwdargs(allkwds, largs) self._set_dmisc(**kwds) self._dstrip['strip'] = 0 ########### # set dictionaries ########### def set_dgeom(self, dgeom=None): self._dgeom = _check_optics._checkformat_dgeom( dgeom=dgeom, ddef=self._ddef['dgeom'], valid_keys=self._get_keys_dgeom(), ) if self._dgeom['move'] is not None: self.set_move( move=self._dgeom['move'], param=self._dgeom['move_param'], **self._dgeom['move_kwdargs'], ) def set_dmat(self, dmat=None): self._dmat = _check_optics._checkformat_dmat( dmat=dmat, dgeom=self._dgeom, ddef=self._ddef['dmat'], valid_keys=self._get_keys_dmat() ) def set_dbragg(self, dbragg=None): self._dbragg = _check_optics._checkformat_dbragg( dbragg=dbragg, ddef=self._ddef['dbragg'], valid_keys=self._get_keys_dbragg(), dmat=self._dmat, ) def _set_color(self, color=None): color = _check_optics._checkformat_inputs_dmisc( color=color, ddef=self._ddef, ) self._dmisc['color'] = color self._dplot['cross']['dP']['color'] = color self._dplot['hor']['dP']['color'] = color # self._dplot['3d']['dP']['color'] = color def _set_dmisc(self, color=None): self._set_color(color) ########### # strip dictionaries ########### def _strip_dgeom(self, lkeep=None): lkeep = self._get_keys_dgeom() utils.ToFuObject._strip_dict(self._dgeom, lkeep=lkeep) def _strip_dmat(self, lkeep=None): lkeep = self._get_keys_dmat() utils.ToFuObject._strip_dict(self._dmat, lkeep=lkeep) def _strip_dbragg(self, lkeep=None): lkeep = self._get_keys_dbragg() utils.ToFuObject._strip_dict(self._dbragg, lkeep=lkeep) def _strip_dmisc(self, lkeep=['color']): utils.ToFuObject._strip_dict(self._dmisc, lkeep=lkeep) ########### # rebuild dictionaries ########### def _rebuild_dgeom(self, lkeep=None): lkeep = self._get_keys_dgeom() reset = utils.ToFuObject._test_Rebuild(self._dgeom, lkeep=lkeep) if reset: utils.ToFuObject._check_Fields4Rebuild(self._dgeom, lkeep=lkeep, dname='dgeom') self._set_dgeom(dgeom=self._dgeom) def _rebuild_dmat(self, lkeep=None): lkeep = self._get_keys_dmat() reset = utils.ToFuObject._test_Rebuild(self._dmat, lkeep=lkeep) if reset: utils.ToFuObject._check_Fields4Rebuild(self._dmat, lkeep=lkeep, dname='dmat') self.set_dmat(self._dmat) def _rebuild_dbragg(self, lkeep=None): lkeep = self._get_keys_dbragg() reset = utils.ToFuObject._test_Rebuild(self._dbragg, lkeep=lkeep) if reset: utils.ToFuObject._check_Fields4Rebuild(self._dbragg, lkeep=lkeep, dname='dbragg') self.set_dbragg(self._dbragg) def _rebuild_dmisc(self, lkeep=['color']): reset = utils.ToFuObject._test_Rebuild(self._dmisc, lkeep=lkeep) if reset: utils.ToFuObject._check_Fields4Rebuild(self._dmisc, lkeep=lkeep, dname='dmisc') self._set_dmisc(color=self.dmisc['color']) ########### # _strip and get/from dict ########### @classmethod def _strip_init(cls): cls._dstrip['allowed'] = [0,1] nMax = max(cls._dstrip['allowed']) doc = """ 1: Remove nothing""" doc = utils.ToFuObjectBase.strip.__doc__.format(doc,nMax) if sys.version[0]=='2': cls.strip.__func__.__doc__ = doc else: cls.strip.__doc__ = doc def strip(self, strip=0): # super() super(CrystalBragg, self).strip(strip=strip) def _strip(self, strip=0): if strip==0: self._rebuild_dgeom() self._rebuild_dmat() self._rebuild_dbragg() self._rebuild_dmisc() else: self._strip_dgeom() self._strip_dmat() self._strip_dbragg() self._strip_dmisc() def _to_dict(self): dout = {'dgeom':{'dict':self._dgeom, 'lexcept':None}, 'dmat':{'dict':self._dmat, 'lexcept':None}, 'dbragg':{'dict':self._dbragg, 'lexcept':None}, 'dmisc':{'dict':self._dmisc, 'lexcept':None}, 'dplot':{'dict':self._dplot, 'lexcept':None}} return dout def _from_dict(self, fd): self._dgeom.update(**fd.get('dgeom', {})) self._dmat.update(**fd.get('dmat', {})) self._dbragg.update(**fd.get('dbragg', {})) self._dmisc.update(**fd.get('dmisc', {})) self._dplot.update(**fd.get('dplot', {})) # ----------- # Properties # ----------- @property def Type(self): """Return the type of structure """ return self._Id.Type @property def dgeom(self): return self._dgeom @property def dmat(self): """Return the polygon defining the structure cross-section""" return self._dmat @property def dbragg(self): """Return the polygon defining the structure cross-section""" return self._dbragg @property def dmisc(self): return self._dmisc # @property # def nin(self): # return self._dgeom['nin'] # @property # def nout(self): # return self._dgeom['nout'] # @property # def e1(self): # return self._dgeom['e1'] # @property # def e2(self): # return self._dgeom['e2'] @property def summit(self): return self._dgeom['summit'] @property def center(self): return self._dgeom['center'] @property def ismobile(self): return self._dgeom['move'] not in [None, False] @property def rockingcurve(self): if self._dbragg.get('rockingcurve') is not None: if self._dbragg['rockingcurve'].get('type') is not None: return self._dbragg['rockingcurve'] raise Exception("rockingcurve was not set!") # -------------------------------------- # methods for getting unit vectors basis # -------------------------------------- def get_unit_vectors(self, use_non_parallelism=None): """ Return the unit vectors (direct orthonormal basis) Depending on: use_non_parallelism: True => return the geometrical basis use_non_parallelism: False => return the mesh basis """ if use_non_parallelism is None: use_non_parallelism = _USE_NON_PARALLELISM if use_non_parallelism is True: nout = self._dmat['nout'] e1 = self._dmat['e1'] e2 = self._dmat['e2'] else: nout = self._dgeom['nout'] e1 = self._dgeom['e1'] e2 = self._dgeom['e2'] return nout, e1, e2, use_non_parallelism # ----------------- # methods for color # ----------------- def set_color(self, col): self._set_color(col) def get_color(self): return self._dmisc['color'] # ----------------- # methods for printing # ----------------- def get_summary(self, sep=' ', line='-', just='l', table_sep=None, verb=True, return_=False): """ Summary description of the object content """ # ----------------------- # Build material col0 = [ 'formula', 'symmetry', 'cut', 'density', 'd (A)', 'bragg({:9.6} A) (deg)'.format(self._dbragg['lambref']*1e10), 'Type', 'outline', 'surface (cmยฒ)', 'rcurve', 'rocking curve', ] ar0 = [self._dmat['formula'], self._dmat['symmetry'], str(self._dmat['cut']), str(self._dmat['density']), '{0:5.3f}'.format(self._dmat['d']*1.e10), str(self._dbragg['braggref']*180./np.pi), self._dgeom['Type'], self._dgeom['Typeoutline'], '{0:5.1f}'.format(self._dgeom['surface']*1.e4), '{0:6.3f}'.format(self._dgeom['rcurve'])] try: ar0.append(self.rockingcurve['type']) except Exception as err: ar0.append('None') # ----------------------- # Build geometry col1 = ['half-extent', 'summit', 'center', 'nout', 'e1', 'alpha', 'beta'] ar1 = [ str(np.round(self._dgeom['extenthalf'], decimals=3)), str(np.round(self._dgeom['summit'], decimals=2)), str(np.round(self._dgeom['center'], decimals=2)), str(np.round(self._dmat['nout'], decimals=3)), str(np.round(self._dmat['e1'], decimals=3)), str(np.round(self._dmat['alpha'], decimals=6)), str(np.round(self._dmat['beta'], decimals=6)), ] if self._dgeom.get('move') not in [None, False]: col1 += ['move', 'param'] ar1 += [self._dgeom['move'], str(np.round(self._dgeom['move_param'], decimals=5))] if self._dmisc.get('color') is not None: col1.append('color') ar1.append(str(self._dmisc['color'])) lcol = [col0, col1] lar = [ar0, ar1] return self._get_summary(lar, lcol, sep=sep, line=line, table_sep=table_sep, verb=verb, return_=return_) # ----------------- # methods for moving # ----------------- def _update_or_copy(self, dgeom, pinhole=None, return_copy=None, name=None, diag=None, shot=None): if return_copy is None: return_copy = _RETURN_COPY for kk, vv in self._dgeom.items(): if kk not in dgeom.keys(): dgeom[kk] = vv if return_copy is True: if name is None: name = self.Id.Name + 'copy' if diag is None: diag = self.Id.Diag if shot is None: diag = self.Id.shot return self.__class__(dgeom=dgeom, dbragg=self._dbragg, dmat=self._dmat, color=self._dmisc['color'], Exp=self.Id.Exp, Diag=diag, Name=name, shot=shot, SavePath=self.Id.SavePath) else: dgeom0 = self.dgeom try: self.set_dgeom(dgeom=dgeom) self._dmat = _check_optics._checkformat_dmat( dmat={ k0: v0 for k0, v0 in self._dmat.items() if k0 not in ['nin', 'nout', 'e1', 'e2'] }, dgeom=self._dgeom, ddef=self._ddef['dmat'], valid_keys=self._get_keys_dmat() ) except Exception as err: # Make sure instance does not move self.set_dgeom(dgeom=dgeom0) msg = (str(err) + "\nAn exception occured during updating\n" + " => instance unmoved") raise Exception(msg) def _rotate_or_translate(self, func, **kwdargs): pts = np.array([self._dgeom['summit'], self._dgeom['center']]).T if 'rotate' in func.__name__: vect = np.array([ self._dgeom['nout'], self._dgeom['e1'], self._dgeom['e2'] ]).T pts, vect = func(pts=pts, vect=vect, **kwdargs) return {'summit': pts[:, 0], 'center': pts[:, 1], 'nout': vect[:, 0], 'nin': -vect[:, 0], 'e1': vect[:, 1], 'e2': vect[:, 2]} else: pts = func(pts=pts, **kwdargs) return {'summit': pts[:, 0], 'center': pts[:, 1]} def translate_in_cross_section(self, distance=None, direction_rz=None, phi=None, return_copy=None, diag=None, name=None, shot=None): """ Translate the instance in the cross-section """ if phi is None: phi = np.arctan2(*self.summit[1::-1]) msg = ("Poloidal plane was not explicitely specified\n" + " => phi set to self.summit's phi ({})".format(phi)) warnings.warn(msg) dgeom = self._rotate_or_translate( self._translate_pts_poloidal_plane, phi=phi, direction_rz=direction_rz, distance=distance) return self._update_or_copy(dgeom, return_copy=return_copy, diag=diag, name=name, shot=shot) def translate_3d(self, distance=None, direction=None, return_copy=None, diag=None, name=None, shot=None): """ Translate the instance in provided direction """ dgeom = self._rotate_or_translate( self._translate_pts_3d, direction=direction, distance=distance) return self._update_or_copy(dgeom, return_copy=return_copy, diag=diag, name=name, shot=shot) def rotate_in_cross_section(self, angle=None, axis_rz=None, phi=None, return_copy=None, diag=None, name=None, shot=None): """ Rotate the instance in the cross-section """ if phi is None: phi = np.arctan2(*self.summit[1::-1]) msg = ("Poloidal plane was not explicitely specified\n" + " => phi set to self.summit's phi ({})".format(phi)) warnings.warn(msg) dgeom = self._rotate_or_translate( self._rotate_pts_vectors_in_poloidal_plane, axis_rz=axis_rz, angle=angle, phi=phi) return self._update_or_copy(dgeom, return_copy=return_copy, diag=diag, name=name, shot=shot) def rotate_around_torusaxis(self, angle=None, return_copy=None, diag=None, name=None, shot=None): """ Rotate the instance around the torus axis """ dgeom = self._rotate_or_translate( self._rotate_pts_vectors_around_torusaxis, angle=angle) return self._update_or_copy(dgeom, return_copy=return_copy, diag=diag, name=name, shot=shot) def rotate_around_3daxis(self, angle=None, axis=None, return_copy=None, diag=None, name=None, shot=None): """ Rotate the instance around the provided 3d axis """ dgeom = self._rotate_or_translate( self._rotate_pts_vectors_around_3daxis, axis=axis, angle=angle) return self._update_or_copy(dgeom, return_copy=return_copy, diag=diag, name=name, shot=shot) def set_move(self, move=None, param=None, **kwdargs): """ Set the default movement parameters A default movement can be set for the instance, it can be any of the pre-implemented movement (rotations or translations) This default movement is the one that will be called when using self.move() Specify the type of movement via the name of the method (passed as a str to move) Specify, for the geometry of the instance at the time of defining this default movement, the current value of the associated movement parameter (angle / distance). This is used to set an arbitrary difference for user who want to use absolute position values The desired incremental movement to be performed when calling self.move will be deduced by substracting the stored param value to the provided param value. Just set the current param value to 0 if you don't care about a custom absolute reference. kwdargs must be a parameters relevant to the chosen method (axis, direction...) e.g.: self.set_move(move='rotate_around_3daxis', param=0., axis=([0.,0.,0.], [1.,0.,0.])) self.set_move(move='translate_3d', param=0., direction=[0.,1.,0.]) """ move, param, kwdargs = self._checkformat_set_move(move, param, kwdargs) self._dgeom['move'] = move self._dgeom['move_param'] = param if isinstance(kwdargs, dict) and len(kwdargs) == 0: kwdargs = None self._dgeom['move_kwdargs'] = kwdargs def move(self, param): """ Set new position to desired param according to default movement Can only be used if default movement was set before See self.set_move() """ param = self._move(param, dictname='_dgeom') self._dgeom['move_param'] = param # ----------------- # methods for rocking curve # ----------------- def get_rockingcurve_func(self, lamb=None, n=None): """ Return the rocking curve function Also return the wavelength (lamb) (in meters) for which it was computed and the associated reference bragg angle (in rad) """ drock = self.rockingcurve if drock['type'] == 'tabulated-1d': if lamb is not None and lamb != drock['lamb']: msg = ("rocking curve was tabulated only for:\n" + "\tlamb = {} m\n".format(lamb) + " => Please let lamb=None") raise Exception(msg) lamb = drock['lamb'] bragg = self._checkformat_bragglamb(lamb=lamb, n=n) func = scpinterp.interp1d(drock['dangle'] + bragg, drock['value'], kind='linear', bounds_error=False, fill_value=0, assume_sorted=True) elif drock['type'] == 'tabulated-2d': lmin, lmax = drock['lamb'].min(), drock['lamb'].max() if lamb is None: lamb = drock['lamb'] if lamb < lmin or lamb > lmax: msg = ("rocking curve was tabulated only in interval:\n" + "\tlamb in [{}; {}] m\n".format(lmin, lmax) + " => Please set lamb accordingly") raise Exception(msg) bragg = self._checkformat_bragglamb(lamb=lamb, n=n) def func(angle, lamb=lamb, bragg=bragg, drock=drock): return scpinterp.interp2d(drock['dangle']+bragg, drock['lamb'], drock['value'], kind='linear', bounds_error=False, fill_value=0, assume_sorted=True)(angle, lamb) else: # TBC raise NotImplementedError def func(angle, d=d, delta_bragg=delta_bragg, Rmax=drock['Rmax'], sigma=drock['sigma']): core = sigma**2/((angle - (bragg+delta_bragg))**2 + sigma**2) if Rmax is None: return core/(sigma*np.pi) else: return Rmax*core return func, lamb, bragg def plot_rockingcurve(self, lamb=None, n=None, sigma=None, npts=None, color=None, ang_units=None, dmargin=None, fs=None, ax=None, legend=None): drock = self.rockingcurve func, lamb, bragg = self.get_rockingcurve_func(lamb=lamb, n=n) axtit = 'Rocking curve for ' + self.Id.Name return _plot_optics.CrystalBragg_plot_rockingcurve( func=func, bragg=bragg, lamb=lamb, sigma=sigma, npts=npts, ang_units=ang_units, axtit=axtit, color=color, fs=fs, ax=ax, legend=legend) def compute_rockingcurve( self, ih=None, ik=None, il=None, lamb=None, use_non_parallelism=None, na=None, alpha_limits=None, therm_exp=None, plot_therm_exp=None, plot_asf=None, plot_power_ratio=None, plot_asymmetry=None, plot_cmaps=None, verb=None, returnas=None, ): return _rockingcurve.compute_rockingcurve( ih=ih, ik=ik, il=il, lamb=lamb, use_non_parallelism=use_non_parallelism, na=na, alpha_limits=alpha_limits, therm_exp=therm_exp, plot_therm_exp=plot_therm_exp, plot_asf=plot_asf, plot_power_ratio=plot_power_ratio, plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps, verb=None, returnas=None, ) def plot_var_temp_changes_wavelengths( self, ih=None, ik=None, il=None, lambdas=None, use_non_parallelism=None, na=None, alpha_limits=None, therm_exp=None, plot_therm_exp=None, plot_asf=None, plot_power_ratio=None, plot_asymmetry=None, plot_cmaps=None, quantity=None, curv_radius=None, pixel_size=None, ): return _rockingcurve.plot_var_temp_changes_wavelengths( ih=ih, ik=ik, il=il, lambdas=lambdas, use_non_parallelism=use_non_parallelism, na=na, alpha_limits=alpha_limits, therm_exp=therm_exp, plot_therm_exp=plot_therm_exp, plot_asf=plot_asf, plot_power_ratio=plot_power_ratio, plot_asymmetry=plot_asymmetry, plot_cmaps=plot_cmaps, quantity=quantity, curv_radius=curv_radius, pixel_size=pixel_size, ) # ----------------- # methods for surface and contour sampling # ----------------- def sample_outline_plot(self, use_non_parallelism=None, res=None): if self._dgeom['Type'] == 'sph': if self._dgeom['Typeoutline'] == 'rect': nout, e1, e2, use_non_parallelism = self.get_unit_vectors( use_non_parallelism=use_non_parallelism, ) outline = _comp_optics.CrystBragg_sample_outline_plot_sphrect( self._dgeom['summit'] - nout*self._dgeom['rcurve'], nout, e1, e2, self._dgeom['rcurve'], self._dgeom['extenthalf'], res, ) else: raise NotImplementedError else: raise NotImplementedError return outline # ----------------- # methods for surface and contour sampling # ----------------- def _checkformat_bragglamb(self, bragg=None, lamb=None, n=None): lc = [lamb is not None, bragg is not None] if not any(lc): lamb = self._dbragg['lambref'] lc[0] = True assert np.sum(lc) == 1, "Provide lamb xor bragg!" if lc[0]: bragg = self.get_bragg_from_lamb( np.atleast_1d(lamb), n=n, ) else: bragg = np.atleast_1d(bragg) return bragg def _checkformat_get_Rays_from(self, phi=None, bragg=None): assert phi is not None assert bragg is not None bragg = np.atleast_1d(bragg) phi = np.atleast_1d(phi) nrays = max(phi.size, bragg.size) if not phi.shape == bragg.shape: if phi.size == 1: phi = np.full(bragg.shape, phi[0]) elif bragg.size == 1: bragg = np.full(phi.shape, bragg[0]) else: msg = "phi and bragg/lamb must have the same shape!\n" msg += " phi.shape: %s\n"%str(phi.shape) msg += " bragg/lamb.shape: %s\n"%str(bragg.shape) raise Exception(msg) return phi, bragg def _get_rays_from_cryst( self, phi=None, bragg=None, lamb=None, n=None, dtheta=None, psi=None, ntheta=None, npsi=None, use_non_parallelism=None, include_summit=None, grid=None, ): # Get phi, bragg bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb) phi, bragg = self._checkformat_get_Rays_from(phi=phi, bragg=bragg) # assert phi.ndim == 1 # Get local summits, nout, e1, e2 pts_start, nout, e1, e2 = self.get_local_noute1e2( dtheta=dtheta, psi=psi, use_non_parallelism=use_non_parallelism, ntheta=ntheta, npsi=npsi, include_summit=include_summit, ) nin = -nout # reshape for broadcast if grid is True: nin = nin[..., None] e1 = e1[..., None] e2 = e2[..., None] else: assert bragg.shape == nin.shape[1:] # Compute start point (D) and unit vectors (us) vect = ( np.sin(bragg)*nin + np.cos(bragg)*(np.cos(phi)*e1 + np.sin(phi)*e2) ) return pts_start, vect def get_rays_from_cryst( self, phi=None, bragg=None, lamb=None, n=None, dtheta=None, psi=None, use_non_parallelism=None, ntheta=None, npsi=None, include_summit=None, det=None, config=None, length=None, returnas=None, return_xixj=None, grid=None, ): """ Return rays stemming from the crystal The rays are defined by a start point (on the crystal surface) and either an end point or a unit vector Start points ------------ The start point is the crystal summit by default But that can be changed using: - ('dtheta', 'psi'): can be arbitrary but with same shape up to 4 dimensions - ('ntheta', 'npsi', 'include_summit'): will be used to compute the envelop (contour) of the crystal, as 2 1d arrays These arguments are fed to self.get_local_noute1e2() which will compute the start points and return them as shape (3, psi.shape) End point or unit vector ------------------------ End point are computed automatically if: - 'config' is provided: ray-tracing is done like for any camera - 'det' is provided: xi and xj can be computed Returning format ---------------- The rays can be returned as: - '(pts, vect, length)': a tuple of: - pts: array of start points on the crystal (only the summit by default) - vect: array - length: - '(pts, vect)': a tuple with only pts and vect - 'pts': a tuple, where both start and end points are returned All arrays represent (X, Y, Z) cartesian coordinates in the tokamak's frame Optionally, can return the (xi, xj) coordinates of points if a detector (det) is provided. """ # ----------- # Check input if returnas is None: returnas = 'pts' if return_xixj is None: return_xixj = False lret = ['(pts, vect, length)', '(pts, vect)', 'pts'] # , object] if returnas not in lret: msg = ( "Arg returnas must be in:\n" + "\t- '(pts, vect, length)': starting points, unit vector," + " length\n" + "\t- 'pts': starting and ending points\n" # + "\t- object: CamLOS1D instance\n" ) raise Exception(msg) det = self._checkformat_det(det) if length is None: length = 10. if grid is None: try: grid = bragg.shape != dtheta.shape except Exception as err: grid = True # ----------- # Get starting point and vectors pts_start, vect = self._get_rays_from_cryst( phi=phi, bragg=bragg, lamb=lamb, n=n, dtheta=dtheta, psi=psi, use_non_parallelism=use_non_parallelism, ntheta=ntheta, npsi=npsi, include_summit=include_summit, grid=grid, ) if returnas == '(pts, vect)': return pts_start, vect # ----------- # Get length (minimum between conf, det, length) vshape = vect.shape dk = { k0: np.full(vshape[1:], np.nan) for k0 in ['config', 'det', 'length'] } xi, xj = None, None if config is not None: # Here insert ray-tracing from config! if vshape != pts_start.shape: if len(vshape) == 3 and len(pts_start.shape) == 2: D = np.reshape( np.repeat(pts_start[..., None], vshape[-1], axis=-1), (3, -1), ) u = vect.reshape((3, -1)) else: msg = ( "Not treated case!\n" f"\t- pts_start.shape: {pts_start.shape}\n" f"\t- vect.shape: {vshape}\n" ) raise Exception(msg) else: if len(vshape) > 2: D = pts_start.reshape((3, -1)) u = vect.reshape((3, -1)) else: D = pts_start u = vect rays = _core.Rays( dgeom=(D, u), config=config, strict=False, Name='dummy', Diag='dummy', Exp='dummy', ) if u.shape != vshape: kout = rays.dgeom['kOut'].reshape(vshape[1:]) else: kout = rays.dgeom['kOut'] dk['config'] = kout if det is not None and det is not False: shape = tuple([3] + [1 for ii in range(vect.ndim-1)]) cent = det['cent'].reshape(shape) nout = det['nout'].reshape(shape) if grid is True: k = ( np.sum((cent-pts_start[..., None])*nout, axis=0) / np.sum(vect*nout, axis=0) ) else: k = ( np.sum((cent-pts_start)*nout, axis=0) / np.sum(vect*nout, axis=0) ) dk['det'][k >= 0.] = k[k >= 0.] if return_xixj is True: if grid: pts_end = pts_start[..., None] + dk['det'][None, ...]*vect else: pts_end = pts_start + dk['det'][None, ...]*vect ei = det['ei'].reshape(shape) ej = det['ej'].reshape(shape) xi = np.sum((pts_end - cent)*ei, axis=0) xj = np.sum((pts_end - cent)*ej, axis=0) if length is not None: dk['length'][:] = length k = np.nanmin([vv for vv in dk.values() if vv is not None], axis=0) # ----------- # return if returnas == 'pts': if grid: pts_end = pts_start[..., None] + k[None, ...]*vect if return_xixj: return pts_start, pts_end, xi, xj else: return pts_start, pts_end else: pts_end = pts_start + k[None, ...]*vect if return_xixj: return pts_start, pts_end, xi, xj else: return pts_start, pts_end elif returnas == '(pts, vect, length)': if return_xixj: return pts_start, vect, k, xi, xj else: return pts_start, vect, k # ----------------- # methods for crystal splitting # ----------------- def split(self, direction=None, nb=None): # ------------ # check inputs if direction is None: direction = 'e1' if direction not in ['e1', 'e2']: msg = ( "Arg direction must be either:\n" "\t- 'e1': split along vector 'e1' (~horizontally)\n" "\t- 'e2': split along vector 'e2' (~vertically)\n" f"You provided: {direction}" ) raise Exception(msg) if nb is None: nb = 2 if not (isinstance(nb, int) and nb > 1): msg = ( "Arg nb must be a int > 1 !\n" "It specifies the number of equal parts desired\n" f"You provided: {nb}" ) raise Exception(msg) # --------------- # split edges = np.linspace(-1, 1, nb+1) mid = 0.5*(edges[1:] + edges[:-1])[None, :] if direction == 'e2': dtheta = mid*self._dgeom['extenthalf'][1] psi = np.zeros((1, nb), dtype=float) extenthalf = [ self._dgeom['extenthalf'][0], self._dgeom['extenthalf'][1]/nb, ] else: dtheta = np.zeros((1, nb), dtype=float) psi = mid*self._dgeom['extenthalf'][0] extenthalf = [ self._dgeom['extenthalf'][0]/nb, self._dgeom['extenthalf'][1], ] nouts = ( np.cos(dtheta)*( self._dgeom['nout'][:, None]*np.cos(psi) + self._dgeom['e1'][:, None]*np.sin(psi) ) + np.sin(dtheta)*self._dgeom['e2'][:, None] ) e1s = ( -self._dgeom['nout'][:, None]*np.sin(psi) + self._dgeom['e1'][:, None]*np.cos(psi) ) e2s = np.array([ nouts[1, :]*e1s[2, :] - nouts[2, :]*e1s[1, :], nouts[2, :]*e1s[0, :] - nouts[0, :]*e1s[2, :], nouts[0, :]*e1s[1, :] - nouts[1, :]*e1s[0, :], ]) # ----------- # Construct list of instances lobj = [ self.__class__( dgeom={ 'rcurve': self._dgeom['rcurve'], 'center': self._dgeom['center'], 'nout': nouts[:, ii], 'e1': e1s[:, ii], 'e2': e2s[:, ii], 'extenthalf': extenthalf, }, dmat={ k0: v0 for k0, v0 in self._dmat.items() if k0 not in ['nin', 'nout', 'e1', 'e2'] }, dbragg=dict(self._dbragg), Name=f"{self.Id.Name}{ii}", Exp=self.Id.Exp, ) for ii in range(nb) ] return lobj # ----------------- # methods for general plotting # ----------------- def plot( self, dcryst=None, phi=None, bragg=None, lamb=None, pts=None, n=None, config=None, det=None, length=None, dtheta=None, psi=None, ntheta=None, npsi=None, include_summit=None, dax=None, proj=None, res=None, element=None, color=None, ddet=None, dleg=None, draw=True, dmargin=None, use_non_parallelism=None, grid=None, rays_npts=None, rays_color=None, fs=None, wintit=None, tit=None, ): """ Plot the crystal in desired projeection The projection is 3d, cross-section or horizontal Optionaly add rays reflected on cryst at: - lamb / phi: desired wavelength and incidence angle and either: - psi, dtheta : desired pts on the crystal surface - pts: emitted from desired pts (e.g.: in the plasma) (need to be refresh with get_rays_from_cryst method if new pts are wanted) Parameters ---------- dax: None / dict dict of axes to be used, with keys: - 'cross': axe where to plot cross-section view - 'hor': axe where to plot horizontal (from top) view - '3d': axe where to plot 3d view if None, a new figure and axes are created proj: None / str key indicating which plot to make: - 'cross': cross-section projection - 'hor': horizontal projection - 'all': cross-section + horizontal view - '3d': 3d view element: None / str char string where each letter indicates an element to plot - 'o': outline (edges of crystal) - 's': summit (geometrical center of the crystal) - 'c': center (of the sphere of curvature) - 'r': rowland circle (plotted in e1 direction) - 'v': local unit vectors e1, e2, nout If None, default to 'oscvr' res: None / float Resolution for the discretization of the outline dcryst: None / dict dict of dict for plotting the various elements of the crystal: - 'outline': dict of properties fed to plot() - 'cent': dict of properties fed to plot() - 'summit': dict of properties fed to plot() - 'rowland': dict of properties fed to plot() - 'vectors': dict of properties fed to quiver() ddet: None / dict dict of dict for plotting the various elements of the det: - 'outline': dict of properties fed to plot() - 'cent': dict of properties fed to plot() - 'vectors': dict of properties fed to quiver() color: None / str / tuple color to be used for plotting Overwrites all colors in dcryst and ddet det: None / dict Optionnal associated detector to be plotted, as a dict with keys: - 'cent': 1d array of cartesian coordinates of the center - 'nout': 1d array of cartesian coordinates of unit vector oriented towards the crystal - 'ei': 1d array of cartesian coordinates of unit vector - 'ej': 1d array of cartesian coordinates of unit vector - 'outline': 2d array of outline coordinates in (ei, ej) dleg: None / dict dict of properties to be passed to plt.legend() if False legend is not plotted use_non_parallelism: None / str Return the unit vectors (direct orthonormal basis) Depending on: - use_non_parallelism: True => return the geometrical basis - use_non_parallelism: False => return the mesh basis """ if det is None: det = False det = self._checkformat_det(det) lc = [ dtheta is not None or psi is not None or phi is not None, pts is not None ] if np.sum(lc) == 2: msg = ( "For ray tracing, please provide either:\n" + "\t- dtheta, psi, phi, lamb/bragg\n" + "\t- pts, lamb/bragg\n" ) raise Exception(msg) # Add rays? if lc[0]: # Get one way # pts.shape = (3, nlamb, npts, ndtheta) pts_summit, pts1 = self.get_rays_from_cryst( phi=phi, lamb=lamb, bragg=bragg, n=n, use_non_parallelism=use_non_parallelism, dtheta=dtheta, psi=psi, ntheta=ntheta, npsi=npsi, include_summit=include_summit, config=config, det=det, returnas='pts', return_xixj=False, grid=grid, ) # Get the other way pts2, xi, xj = self.get_rays_from_cryst( phi=phi+np.pi, lamb=lamb, bragg=bragg, n=n, use_non_parallelism=use_non_parallelism, dtheta=dtheta, psi=psi, ntheta=ntheta, npsi=npsi, include_summit=include_summit, config=config, det=det, returnas='pts', return_xixj=True, grid=grid, )[1:] elif lc[1]: c0 = ( isinstance(pts, np.ndarray) and pts.ndim == 2 and pts.shape[0] == 3 ) if not c0: msg = ("Arg pts must be a (3, npts) np.array!") raise Exception(msg) # pts.shape = (nlamb, npts, ndtheta) dtheta, psi, phi, bragg, _, _ = self.calc_raytracing_from_lambpts( pts=pts, lamb=lamb, ndtheta=ntheta, ) pts_summit, pts2, xi, xj = self.get_rays_from_cryst( phi=phi+np.pi, lamb=None, bragg=bragg, n=n, use_non_parallelism=use_non_parallelism, dtheta=dtheta, psi=psi, ntheta=ntheta, npsi=npsi, include_summit=include_summit, config=config, det=det, returnas='pts', return_xixj=True, grid=grid, ) pts1 = np.repeat( np.repeat( np.repeat( pts[:, None, :], dtheta.shape[0], axis=1, )[..., None], dtheta.shape[2], axis=-1, )[..., None], 2, axis=-1, ) else: pts_summit, pts1, pts2, xi, xj = None, None, None, None, None return _plot_optics.CrystalBragg_plot( cryst=self, dcryst=dcryst, det=det, ddet=ddet, dax=dax, proj=proj, res=res, element=element, color=color, pts_summit=pts_summit, pts1=pts1, pts2=pts2, xi=xi, xj=xj, rays_color=rays_color, rays_npts=rays_npts, dleg=dleg, draw=draw, fs=fs, dmargin=dmargin, use_non_parallelism=use_non_parallelism, wintit=wintit, tit=tit, ) # ----------------- # methods for generic first-approx # ----------------- def get_phi_from_magaxis_summit( self, axis_r, axis_z, axis_npts=None, lamb=None, lamb_tol=None, bragg=None, n=None, use_non_parallelism=None, ): """ Return phi of a magnteic axis (at lamb with tolerance) axis_r and axis_z must be np.ndarrays of the same shape The magnetic axis is discretized toroidally in axis_npts (def: 1000) The pts closest to the chosen lamb are picked If no pts is found within tolerance, an error is raised """ # -------------------- # Check / format input if axis_npts is None: axis_npts = 1000 axis_r = np.atleast_1d(axis_r) axis_z = np.atleast_1d(axis_z) assert axis_r.shape == axis_z.shape if lamb_tol is None: lamb_tol = 0.01e-10 bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) lamb = self.get_lamb_from_bragg(bragg=bragg, n=n) # -------------- # Disretize axis shaperz = axis_r.shape phi_ax = np.full(shaperz, np.nan) # Compute phi theta_cryst = np.arctan2( self._dgeom['summit'][1], self._dgeom['summit'][0], ) theta_ax = theta_cryst + np.pi/2*np.linspace(-1, 1, axis_npts) shapetheta = np.r_[[1 for ii in shaperz], axis_npts] theta_ax = theta_ax.reshape(shapetheta) axis_x = (axis_r[..., None] * np.cos(theta_ax)).ravel() axis_y = (axis_r[..., None] * np.sin(theta_ax)).ravel() axis_z = (np.repeat(axis_z[..., None], axis_npts, axis=-1)).ravel() # ---------------------------------------------- # Compute bragg, phi, lamb of each point on axis ( bragg_ax_full, phi_ax_full, lamb_ax_full, ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi( pts=np.array([axis_x, axis_y, axis_z]), dtheta=None, psi=None, ntheta=None, npsi=None, n=None, use_non_parallelism=use_non_parallelism, grid=None, return_lamb=True, ) # ------------------------------------- # Select points on axis closest to lamb # lamb_ax_full = self.get_lamb_from_bragg(bragg_ax_full) shape_full = tuple(np.r_[shaperz, axis_npts]) lamb_ax_full = lamb_ax_full.reshape(shape_full) phi_ax_full = phi_ax_full.reshape(shape_full) dlamb = np.abs(lamb_ax_full - lamb) indok = np.any(dlamb <= lamb_tol, axis=-1) indmin = np.nanargmin(dlamb[indok, :], axis=-1) indtup = tuple([iii for iii in indok.nonzero()] + [indmin]) phi_ax[indok] = phi_ax_full[indtup] return phi_ax def get_bragg_from_lamb(self, lamb=None, n=None): """ Braggs' law: n*lamb = 2dsin(bragg) """ if self._dmat['d'] is None: msg = "Interplane distance d no set !\n" msg += " => self.set_dmat({'d':...})" raise Exception(msg) if lamb is None: lamb = self._dbragg['lambref'] return _comp_optics.get_bragg_from_lamb( np.atleast_1d(lamb), self._dmat['d'], n=n, ) def get_lamb_from_bragg(self, bragg=None, n=None): """ Braggs' law: n*lamb = 2dsin(bragg) """ if self._dmat['d'] is None: msg = "Interplane distance d no set !\n" msg += " => self.set_dmat({'d':...})" raise Exception(msg) if bragg is None: bragg = self._dbragg['braggref'] return _comp_optics.get_lamb_from_bragg(np.atleast_1d(bragg), self._dmat['d'], n=n) def update_non_parallelism(self, alpha=None, beta=None): """ Compute new values of unit vectors nout, e1 and e2 into dmat basis, due to non parallelism Update new values into dmat dict """ if alpha is None: alpha = 0 if beta is None: beta = 0 (self._dmat['nin'], self._dmat['nout'], self._dmat['e1'], self._dmat['e2']) = _comp_optics.get_vectors_from_angles( alpha, beta, self._dgeom['nout'], self._dgeom['e1'], self._dgeom['e2'], ) self._dmat['alpha'], self._dmat['beta'] = alpha, beta def calc_meridional_sagital_focus( self, rcurve=None, bragg=None, alpha=None, use_non_parallelism=None, verb=None, ): """ Compute sagittal and meridional focuses distances. Optionnal result according to non-parallelism, using first the update_non_parallelism method. parameters ---------- rcurve: float in dgeom dict., curvature radius of the crystal. bragg: float in dbragg dict., reference bragg angle of the crystal. alpha: float in dmat dict., amplitude of the non-parallelism as an a angle defined by user, in radian. use_non_parallelism: str Need to be True to use new alpha angle Return ------ merid_ref: float Distance crystal-meridional focus (m), for a perfect crystal sagit_ref: float Distance crystal-sagital focus (m), for a perfect crystal merid_unp: float Distance crystal-meridional focus (m), using non_parallelism sagit_unp: float Distance crystal-sagital focus (m), using non_parallelism """ # Check inputs if rcurve is None: rcurve = self._dgeom['rcurve'] if bragg is None: bragg = self._dbragg['braggref'] if use_non_parallelism is True: alpha = self._dmat['alpha'] if use_non_parallelism is False: alpha = 0.0 # Compute return _comp_optics.calc_meridional_sagital_focus( rcurve=rcurve, bragg=bragg, alpha=alpha, use_non_parallelism=use_non_parallelism, verb=verb, ) def get_rowland_dist_from_lambbragg(self, bragg=None, lamb=None, n=None): """ Return the array of dist from cryst summit to pts on rowland """ bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) if np.all(np.isnan(bragg)): msg = ("There is no available bragg angle!\n" + " => Check the vlue of self.dmat['d'] vs lamb") raise Exception(msg) return _comp_optics.get_rowland_dist_from_bragg( bragg=bragg, rcurve=self._dgeom['rcurve'], ) def get_detector_ideal( self, bragg=None, lamb=None, rcurve=None, n=None, ddist=None, di=None, dj=None, dtheta=None, dpsi=None, tilt=None, lamb0=None, lamb1=None, dist01=None, use_non_parallelism=None, tangent_to_rowland=None, plot=False, ): """ Return approximate ideal detector geometry Assumes infinitesimal and ideal crystal Returns a dict containing the position and orientation of a detector if it was placed ideally on the rowland circle, centered on the desired bragg angle (in rad) or wavelength (in m) The detector can be tangential to the Rowland circle or perpendicular to the line between the crystal and the detector Assumes detector center matching lamb (m) / bragg (rad) The detector can be translated towards / away from the crystal to make sure the distance between 2 spectral lines (lamb0 and lamb1) on the detector's plane matches a desired distance (dist01, in m) Finally, a desired offset (translation) can be added via (ddist, di, dj), in m Similarly, an extra rotation can be added via (dtheta, dpsi, tilt) Detector is described by center position and (nout, ei, ej) unit vectors By convention, nout = np.cross(ei, ej) Vectors (ei, ej) define an orthogonal frame in the detector's plane All coordinates are 3d (X, Y, Z in the tokamak's frame) Return: ------- det: dict dict of detector geometrical characteristics: 'cent': np.ndarray (3,) array of (x, y, z) coordinates of detector center 'nout': np.ndarray (3,) array of (x, y, z) coordinates of unit vector perpendicular to detector' surface oriented towards crystal 'ei': np.ndarray (3,) array of (x, y, z) coordinates of unit vector defining first coordinate in detector's plane 'ej': np.ndarray (3,) array of (x, y, z) coordinates of unit vector defining second coordinate in detector's plane 'outline': np.darray (2, N) array to build detector's contour where the last point is identical to the first. (for example for WEST X2D spectrometer: x*np.r_[-1,-1,1,1,-1], y*np.r_[-1,1,1,-1,-1]) """ # --------------------- # Check / format inputs if rcurve is None: rcurve = self._dgeom['rcurve'] bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) if np.all(np.isnan(bragg)): msg = ("There is no available bragg angle!\n" + " => Check the vlue of self.dmat['d'] vs lamb") raise Exception(msg) lc = [lamb0 is not None, lamb1 is not None, dist01 is not None] if any(lc) and not all(lc): msg = ( "Arg lamb0, lamb1 and dist01 must be provided together:\n" + "\t- lamb0: line0 wavelength ({})\n".format(lamb0) + "\t- lamb1: line1 wavelength ({})\n".format(lamb1) + "\t- dist01: distance (m) on detector between lines " + "({})".format(dist01) ) raise Exception(msg) bragg01 = None if all(lc): bragg01 = self._checkformat_bragglamb( lamb=np.r_[lamb0, lamb1], n=n, ) # split into 2 different condition because of dmat lc = [rcurve is None, self._dgeom['summit'] is None] if any(lc): msg = ( "Some missing fields in dgeom for computation:" + "\n\t-" + "\n\t-".join(['rcurve'] + 'summit') ) raise Exception(msg) nout, e1, e2, use_non_parallelism = self.get_unit_vectors( use_non_parallelism=use_non_parallelism, ) lc = [cc is None for cc in [nout, e1, e2]] if any(lc): msg = ( """ Field 'nout', 'e1', 'e2' missing! """ ) raise Exception(msg) # Compute crystal-centered parameters in (nout, e1, e2) (det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel) = _comp_optics.get_approx_detector_rel( rcurve, bragg, bragg01=bragg01, dist01=dist01, tangent_to_rowland=tangent_to_rowland) # Deduce absolute position in (x, y, z) det_cent, det_nout, det_ei, det_ej = _comp_optics.get_det_abs_from_rel( det_dist, n_crystdet_rel, det_nout_rel, det_ei_rel, self._dgeom['summit'], nout, e1, e2, ddist=ddist, di=di, dj=dj, dtheta=dtheta, dpsi=dpsi, tilt=tilt) if plot: dax = self.plot() p0 = np.repeat(det_cent[:,None], 3, axis=1) vv = np.vstack((det_nout, det_ei, det_ej)).T dax['cross'].plot(np.hypot(det_cent[0], det_cent[1]), det_cent[2], 'xb') dax['hor'].plot(det_cent[0], det_cent[1], 'xb') dax['cross'].quiver(np.hypot(p0[0, :], p0[1, :]), p0[2, :], np.hypot(vv[0, :], vv[1, :]), vv[2, :], units='xy', color='b') dax['hor'].quiver(p0[0, :], p0[1, :], vv[0, :], vv[1, :], units='xy', color='b') return {'cent': det_cent, 'nout': det_nout, 'ei': det_ei, 'ej': det_ej} def _checkformat_det(self, det=None): lc = [det is None, det is False, isinstance(det, dict)] msg = ("det must be:\n" + "\t- False: not det provided\n" + "\t- None: use default approx det from:\n" + "\t self.get_detector_ideal()\n" + "\t- dict: a dictionary of 3d (x,y,z) coordinates of a point" + " (local frame center) and 3 unit vectors forming a direct " + "orthonormal basis attached to the detector's frame\n" + "\t\t\t\t- 'cent': detector center\n" + "\t\t\t\t- 'nout': unit vector perpendicular to surface, " + "in direction of the crystal\n" + "\t\t\t\t- 'ei': unit vector, first coordinate on surface\n" + "\t\t\t\t- 'ej': unit vector, second coordinate on surfacei\n" + " You provided: {}".format(det)) if not any(lc): raise Exception(msg) if lc[0]: det = self.get_detector_ideal(lamb=self._dbragg['lambref']) elif lc[2]: lk = ['cent', 'nout', 'ei', 'ej'] c0 = (isinstance(det, dict) and all([(kk in det.keys() and hasattr(det[kk], '__iter__') and np.atleast_1d(det[kk]).size == 3 and not np.any(np.isnan(det[kk]))) for kk in lk])) if not c0: raise Exception(msg) for k0 in lk: det[k0] = np.atleast_1d(det[k0]).ravel() return det def get_local_noute1e2( self, dtheta=None, psi=None, ntheta=None, npsi=None, use_non_parallelism=None, include_summit=None, ): """ Return (vout, ve1, ve2) associated to pts on the crystal's surface All points on the spherical crystal's surface are identified by (dtheta, psi) coordinates, where: - theta = np.pi/2 + dtheta (dtheta=0 default) for the center (for the diffracted beam), from frame's basis vector ez - psi = 0 for the center, positive in direction of e1 They are the spherical coordinates from a sphere centered on the crystal's center of curvature. Args (dtheta, psi) can be: - arbitrary: same shape and dimension up to 4 - 'envelop': will be computed to represent the crystal contour will be returned as 2 1d arrays Return the pts themselves and the 3 perpendicular local unit vectors (nout, e1, e2), where nout is towards the outside of the sphere and nout = np.cross(e1, e2) In all cases, the output have shape (3, psi.shape) Return: ------- summ: np.ndarray coordinates of the points on the surface vout: np.ndarray coordinates of outward unit vector ve1: np.ndarray coordinates of first tangential unit vector ve2: np.ndarray coordinates of second tangential unit vector All are cartesian (X, Y, Z) coordinates in the tokamak's frame """ # Get local basis at crystal summit nout, e1, e2, use_non_parallelism = self.get_unit_vectors( use_non_parallelism=use_non_parallelism, ) nin = -nout # Get vectors at any points from psi & dtheta vout, ve1, ve2 = _comp_optics.CrystBragg_get_noute1e2_from_psitheta( nout, e1, e2, psi=psi, dtheta=dtheta, e1e2=True, sameshape=False, extenthalf_psi=self._dgeom['extenthalf'][0], extenthalf_dtheta=self._dgeom['extenthalf'][1], ntheta=ntheta, npsi=npsi, include_summit=include_summit, ) vin = -vout # cent no longer dgeom['center'] because no longer a fixed point cent = self._dgeom['summit'] + self._dgeom['rcurve']*nin reshape = np.r_[3, [1 for ii in range(vout.ndim - 1)]] cent = cent.reshape(reshape) # Redefining summit according to nout at each point at crystal summ = cent + self._dgeom['rcurve']*vout return summ, vout, ve1, ve2 def calc_xixj_from_braggphi( self, phi=None, bragg=None, lamb=None, n=None, dtheta=None, psi=None, det=None, use_non_parallelism=None, strict=None, return_strict=None, data=None, plot=True, dax=None, ): """ Assuming crystal's summit as frame origin According to [1], this assumes a local frame centered on the crystal These calculations are independent from the tokamak's frame: The origin of the local frame is the crystal's summit The (O, ez) axis is the crystal's normal The crystal is tangent to (O, ex, ey) [1] tofu/Notes_Upgrades/SpectroX2D/SpectroX2D_EllipsesOnPlane.pdf Parameters: ----------- Z: float Detector's plane intersection with (O, ez) axis n: np.ndarray (3,) array containing local (x,y,z) coordinates of the plane's normal vector """ if return_strict is None: return_strict = False # Check / format inputs bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) phi = np.atleast_1d(phi) # Check / get det det = self._checkformat_det(det) # Get local summit nout, e1, e2 if non-centered if dtheta is None: dtheta = 0. if psi is None: psi = 0. # Probably to update with use_non_parallelism? # Get back summit & vectors at any point at the crystal surface, # according to parallelism properties summit, nout, e1, e2 = self.get_local_noute1e2( dtheta=dtheta, psi=psi, use_non_parallelism=use_non_parallelism, ntheta=None, npsi=None, include_summit=False, ) # Compute xi, xj, strict = _comp_optics.calc_xixj_from_braggphi( det_cent=det['cent'], det_nout=det['nout'], det_ei=det['ei'], det_ej=det['ej'], det_outline=det.get('outline'), summit=summit, nout=nout, e1=e1, e2=e2, bragg=bragg, phi=phi, strict=strict, ) if plot: dax = _plot_optics.CrystalBragg_plot_approx_detector_params( bragg, xi, xj, data, dax, ) if return_strict is True: return xi, xj, strict else: return xi, xj def plot_line_on_det_tracing( self, lamb=None, n=None, nphi=None, det=None, johann=None, use_non_parallelism=None, lpsi=None, ldtheta=None, strict=None, ax=None, dleg=None, rocking=None, fs=None, dmargin=None, wintit=None, tit=None, ): """ Visualize the de-focusing by ray-tracing of chosen lamb Possibility to plot few wavelength' arcs on the same plot. Args: - lamb: array of min size 1, in 1e-10 [m] - det: dict - xi_bounds: np.min & np.max of _XI - xj_bounds: np.min & np.max of _XJ (from "inputs_temp/XICS_allshots_C34.py" l.649) - johann: True or False """ # Check / format inputs if lamb is None: lamb = self._dbragg['lambref'] lamb = np.atleast_1d(lamb).ravel() nlamb = lamb.size if johann is None: johann = lpsi is not None or ldtheta is not None if rocking is None: rocking = False if det is None or det.get('outline') is None: msg = ("Please provide det as a dict with 'outline'!") raise Exception(msg) # Get local basis nout, e1, e2, use_non_parallelism = self.get_unit_vectors( use_non_parallelism=use_non_parallelism, ) nin = -nout # Compute lamb / phi _, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=det['outline'][0, :], xj=det['outline'][1, :], det=det, dtheta=0, psi=0, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=False, ) phimin, phimax = np.nanmin(phi), np.nanmax(phi) phimin, phimax = phimin-(phimax-phimin)/10, phimax+(phimax-phimin)/10 # Get reference ray-tracing bragg = self._checkformat_bragglamb(lamb=lamb, n=n) if nphi is None: nphi = 100 phi = np.linspace(phimin, phimax, nphi) xi = np.full((nlamb, nphi), np.nan) xj = np.full((nlamb, nphi), np.nan) for ll in range(nlamb): xi[ll, :], xj[ll, :] = self.calc_xixj_from_braggphi( bragg=np.full(phi.shape, bragg[ll]), phi=phi, dtheta=0., psi=0., n=n, det=det, use_non_parallelism=use_non_parallelism, strict=strict, plot=False, ) # Get johann-error raytracing (multiple positions on crystal) xi_er, xj_er = None, None if johann and not rocking: if lpsi is None: lpsi = np.linspace(-1., 1., 15) if ldtheta is None: ldtheta = np.linspace(-1., 1., 15) lpsi, ldtheta = np.meshgrid(lpsi, ldtheta) lpsi = lpsi.ravel() ldtheta = ldtheta.ravel() lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi] ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta] npsi = lpsi.size assert npsi == ldtheta.size xi_er = np.full((nlamb, npsi*nphi), np.nan) xj_er = np.full((nlamb, npsi*nphi), np.nan) for l in range(nlamb): for ii in range(npsi): i0 = np.arange(ii*nphi, (ii+1)*nphi) xi_er[l, i0], xj_er[l, i0] = self.calc_xixj_from_braggphi( phi=phi, bragg=bragg[l], lamb=None, n=n, dtheta=ldtheta[ii], psi=lpsi[ii], det=det, plot=False, use_non_parallelism=use_non_parallelism, strict=strict, ) # Get rocking curve error if rocking: pass # Plot return _plot_optics.CrystalBragg_plot_line_tracing_on_det( lamb, xi, xj, xi_er, xj_er, det=det, ax=ax, dleg=dleg, johann=johann, rocking=rocking, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit) def calc_johannerror( self, xi=None, xj=None, err=None, det=None, n=None, lpsi=None, ldtheta=None, lambda_interval_min=None, lambda_interval_max=None, use_non_parallelism=None, plot=True, fs=None, cmap=None, vmin=None, vmax=None, tit=None, wintit=None, ): """ Plot the johann error The johann error is the error (scattering) induced by defocalization due to finite crystal dimensions There is a johann error on wavelength (lamb => loss of spectral resolution) and on directionality (phi) If provided, lpsi and ldtheta are taken as normalized variations with respect to the crystal summit and to its extenthalf. Typical values are: - lpsi = [-1, 1, 1, -1] - ldtheta = [-1, -1, 1, 1] They must have the same len() First affecting a reference lambda according to: - pixel's position - crystal's summit Then, computing error on bragg and phi angles on each pixels by computing lambda and phi from the crystal's outline Provide lambda_interval_min/max to ensure the given wavelength interval is detected over the whole surface area. A True/False boolean is then returned. """ # Check xi, xj once before to avoid doing it twice if err is None: err = 'abs' if lambda_interval_min is None: lambda_interval_min = 3.93e-10 if lambda_interval_max is None: lambda_interval_max = 4.00e-10 xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj) # Check / format inputs bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xii, xj=xjj, det=det, dtheta=0, psi=0, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=True, ) # Only one summit was selected bragg, phi, lamb = bragg[..., 0], phi[..., 0], lamb[..., 0] # Check lambda interval into lamb array c0 = ( np.min(lamb) < lambda_interval_min and np.max(lamb) > lambda_interval_max ) if c0: test_lambda_interv = True else: test_lambda_interv = False # Get err from multiple ldtheta, lpsi if lpsi is None: lpsi = np.r_[-1., 0., 1., 1., 1., 0., -1, -1] lpsi = self._dgeom['extenthalf'][0]*np.r_[lpsi] if ldtheta is None: ldtheta = np.r_[-1., -1., -1., 0., 1., 1., 1., 0.] ldtheta = self._dgeom['extenthalf'][1]*np.r_[ldtheta] npsi = lpsi.size assert npsi == ldtheta.size ( braggerr, phierr, lamberr, ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xii, xj=xjj, det=det, dtheta=ldtheta, psi=lpsi, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=True, ) err_lamb = np.nanmax(np.abs(lamb[..., None] - lamberr), axis=-1) err_phi = np.nanmax(np.abs(phi[..., None] - phierr), axis=-1) # absolute vs relative error if 'rel' in err: if err == 'rel': err_lamb = 100.*err_lamb / (np.nanmax(lamb) - np.nanmin(lamb)) err_phi = 100.*err_phi / (np.nanmax(phi) - np.nanmin(phi)) elif err == 'rel2': err_lamb = 100.*err_lamb / np.mean(lamb) err_phi = 100.*err_phi / np.mean(phi) err_lamb_units = '%' err_phi_units = '%' else: err_lamb_units = 'm' err_phi_units = 'rad' if plot is True: ax = _plot_optics.CrystalBragg_plot_johannerror( xi, xj, lamb, phi, err_lamb, err_phi, err_lamb_units=err_lamb_units, err_phi_units=err_phi_units, cmap=cmap, vmin=vmin, vmax=vmax, fs=fs, tit=tit, wintit=wintit, ) return ( err_lamb, err_phi, err_lamb_units, err_phi_units, test_lambda_interv, ) def plot_focal_error_summed( self, dist_min=None, dist_max=None, di_min=None, di_max=None, ndist=None, ndi=None, lamb=None, bragg=None, xi=None, xj=None, err=None, use_non_parallelism=None, tangent_to_rowland=None, n=None, plot=None, pts=None, det_ref=None, plot_dets=None, nsort=None, dcryst=None, lambda_interval_min=None, lambda_interval_max=None, contour=None, fs=None, ax=None, cmap=None, vmin=None, vmax=None, return_ax=None, ): """ Using the calc_johannerror method, computing the sum of the focalization error over the whole detector for different positions characterized by the translations ddist and di in the equatorial plane (dist_min, dist_max, ndist) (di_min, di_max, ndi). Parameters: ----------- - lamb/bragg : float Automatically set to crystal's references - xi, xj : np.ndarray pixelization of the detector (from "inputs_temp/XICS_allshots_C34.py" l.649) - alpha, beta : float Values of Non Parallelism references angles - use_non_parallelism : str - tangent_to_rowland : str - plot_dets : str Possibility to plot the nsort- detectors with the lowest summed focalization error, next to the Best Approximate Real detector dict(np.load('det37_CTVD_incC4_New.npz', allow_pickle=True)) - nsort : float Number of best detector's position to plot - lambda_interv_min/max : float To ensure the given wavelength interval is detected over the whole surface area. A True/False boolean is then returned. """ # Check / format inputs if dist_min is None: dist_min = -0.15 if dist_max is None: dist_max = 0.15 if di_min is None: di_min = -0.40 if di_max is None: di_max = 0.40 if ndist is None: ndist = 21 if ndi is None: ndi = 21 if err is None: err = 'rel' if plot is None: plot = True if plot_dets is None: plot_dets = det_ref is not None if nsort is None: nsort = 5 if return_ax is None: return_ax = True if lambda_interval_min is None: lambda_interval_min = 3.93e-10 if lambda_interval_max is None: lambda_interval_max = 4.00e-10 l0 = [dist_min, dist_max, ndist, di_min, di_max, ndi] c0 = any([l00 is not None for l00 in l0]) if not c0: msg = ( "Please give the ranges of ddist and di translations\n" "\t to compute the different detector's position\n" "\t Provided:\n" "\t\t- dist_min, dist_max, ndist: ({}, {}, {})\n".format( dist_min, dist_max, ndist, ) + "\t\t- di_min, di_max, ndi: ({}, {}, {})\n".format( di_min, di_max, ndi, ) ) raise Exception(msg) # ------------ # Compute local coordinates of det_ref ( ddist0, di0, dj0, dtheta0, dpsi0, tilt0, ) = self._get_local_coordinates_of_det( bragg=bragg, lamb=lamb, det_ref=det_ref, use_non_parallelism=use_non_parallelism, ) # angle between nout vectors from get_det_approx() & ## get_det_approx(tangent=False) det1 = self.get_detector_ideal( lamb=lamb, bragg=bragg, use_non_parallelism=use_non_parallelism, tangent_to_rowland=True, ) det2 = self.get_detector_ideal( lamb=lamb, bragg=bragg, use_non_parallelism=use_non_parallelism, tangent_to_rowland=False, ) cos_angle_nout = np.sum( det1['nout'] * det2['nout'] ) / ( np.linalg.norm(det1['nout'] * np.linalg.norm(det2['nout'])) ) angle_nout = np.arccos(cos_angle_nout) # Compute ddist = np.linspace(dist_min, dist_max, int(ndist)) di = np.linspace(di_min, di_max, int(ndi)) error_lambda = np.full((di.size, ddist.size), np.nan) test_lamb_interv = np.zeros((di.size, ddist.size), dtype='bool') end = '\r' for ii in range(ddist.size): for jj in range(di.size): # print progression if ii == ndist-1 and jj == ndi-1: end = '\n' msg = ( "Computing mean focal error for det " f"({ii+1}, {jj+1})/({ndist}, {ndi})" ).ljust(60) print(msg, end=end, flush=True) # Get det dpsi0bis = float(dpsi0) if tangent_to_rowland: dpsi0bis = dpsi0 - angle_nout det = self.get_detector_ideal( ddist=ddist[ii], di=di[jj], dj=dj0, dtheta=dtheta0, dpsi=dpsi0bis, tilt=tilt0, lamb=lamb, bragg=bragg, use_non_parallelism=use_non_parallelism, tangent_to_rowland=False, ) # Integrate error ( error_lambda_temp, test_lamb_interv[jj, ii], ) = self.calc_johannerror( xi=xi, xj=xj, det=det, err=err, lambda_interval_min=lambda_interval_min, lambda_interval_max=lambda_interval_max, plot=False, )[::4] error_lambda[jj, ii] = np.nanmean(error_lambda_temp) if 'rel' in err: units = '%' else: units = 'm' if plot: ax = _plot_optics.CrystalBragg_plot_focal_error_summed( cryst=self, dcryst=dcryst, lamb=lamb, bragg=bragg, error_lambda=error_lambda, ddist=ddist, di=di, ddist0=ddist0, di0=di0, dj0=dj0, dtheta0=dtheta0, dpsi0=dpsi0, tilt0=tilt0, angle_nout=angle_nout, det_ref=det_ref, units=units, plot_dets=plot_dets, nsort=nsort, tangent_to_rowland=tangent_to_rowland, use_non_parallelism=use_non_parallelism, pts=pts, test_lamb_interv=test_lamb_interv, contour=contour, fs=fs, ax=ax, cmap=cmap, vmin=vmin, vmax=vmax, ) if return_ax: return error_lambda, ddist, di, test_lamb_interv, ax else: return error_lambda, ddist, di, test_lamb_interv def _get_local_coordinates_of_det( self, bragg=None, lamb=None, det_ref=None, use_non_parallelism=None, ): """ Computation of translation (ddist, di, dj) and angular (dtheta, dpsi, tilt) properties of an arbitrary detector choosen by the user. """ # ------------ # check inputs if det_ref is None: msg = ( "You need to provide your arbitrary detector\n" + "\t in order to compute its spatial properties !\n" + "\t You provided: {}".format(det) ) raise Exception(msg) # Checkformat det det_ref = self._checkformat_det(det=det_ref) # ------------ # get approx detect det_approx = self.get_detector_ideal( bragg=bragg, lamb=lamb, tangent_to_rowland=False, use_non_parallelism=use_non_parallelism, ) # ------------ # get vector delta between centers delta = det_ref['cent'] - det_approx['cent'] ddist = np.sum(delta * (-det_approx['nout'])) di = np.sum(delta * det_approx['ei']) dj = np.sum(delta * det_approx['ej']) # --------------- # get angles from unit vectors dtheta, dpsi, tilt = None, None, None # use formulas in _comp_optics.get_det_abs_from_rel() sindtheta = np.sum(det_approx['ej'] * det_ref['nout']) costheta_cospsi = np.sum(det_approx['nout'] * det_ref['nout']) costheta_sinpsi = np.sum(det_approx['ei'] * det_ref['nout']) costheta = np.sqrt(costheta_cospsi**2 + costheta_sinpsi**2) dtheta = np.arctan2(sindtheta, costheta) dpsi = np.arctan2( costheta_sinpsi / costheta, costheta_cospsi / costheta, ) # --------- # tilt det_ei2 = ( np.cos(dpsi)*det_approx['ei'] - np.sin(dpsi)*det_approx['nout'] ) det_ej2 = np.cross(det_ref['nout'], det_ei2) costilt = np.sum(det_ref['ei']*det_ei2) sintilt = np.sum(det_ref['ei']*det_ej2) tilt = np.arctan2(sintilt, costilt) return ddist, di, dj, dtheta, dpsi, tilt def get_lambbraggphi_from_ptsxixj_dthetapsi( self, pts=None, xi=None, xj=None, det=None, dtheta=None, psi=None, ntheta=None, npsi=None, n=None, use_non_parallelism=None, grid=None, return_lamb=None, ): """ Return the lamb, bragg and phi for provided pts and dtheta/psi if grid = True: compute all pts / dtheta/psi comnbinations => return (npts, ndtheta) arrays else: each pts is associated to a single dtheta/psi => assumes npts == ndtheta == npsi => return (npts,) arrays """ # Check / Format inputs if return_lamb is None: return_lamb = True det = self._checkformat_det(det) # Get local basis summ, vout, ve1, ve2 = self.get_local_noute1e2( dtheta=dtheta, psi=psi, ntheta=ntheta, npsi=npsi, use_non_parallelism=use_non_parallelism, include_summit=True, ) # Derive bragg, phi bragg, phi = _comp_optics.calc_braggphi_from_xixjpts( pts=pts, xi=xi, xj=xj, det=det, summit=summ, nin=-vout, e1=ve1, e2=ve2, grid=grid, ) # Derive lamb if return_lamb is True: lamb = self.get_lamb_from_bragg(bragg=bragg, n=n) return bragg, phi, lamb else: return bragg, phi def get_lamb_avail_from_pts( self, pts=None, n=None, ndtheta=None, det=None, nlamb=None, klamb=None, use_non_parallelism=None, strict=None, return_phidtheta=None, return_xixj=None, ): """ Return the wavelength accessible from plasma points on the crystal For a given plasma point, only a certain lambda interval can be bragg-diffracted on the crystal (due to bragg's law and the crystal's dimensions) Beware, for a given pts and lamb, there can be up to 2 sets of solutions All non-valid solutions are set to nans, such that most of the time there is only one For a set of given: - pts (3, npts) array, (x, y, z) coordinates Using: - nlamb: sampling of the lamb interval (default: 100) - ndtheta: sampling of the lamb interval (default: 20) - det: (optional) a detector dict, for xi and xj Returns: - lamb: (npts, nlamb) array of sampled valid wavelength interval - phi: (npts, nlamb, ndtheta, 2) array of phi - dtheta: (npts, nlamb, ndtheta, 2) array of dtheta - psi: (npts, nlamb, ndtheta, 2) array of psi And optionally (return_xixj=True and det provided as dict): - xi: (npts, nlamb, ndtheta, 2) array of xi - xj: (npts, nlamb, ndtheta, 2) array of xj The result is computed with or w/o taking into account non-parallelism """ # Check / format if ndtheta is None: ndtheta = 20 if nlamb is None: nlamb = 100 assert nlamb >= 2, "nlamb must be >= 2" if return_phidtheta is None: return_phidtheta = True if return_xixj is None: return_xixj = det is not None if det is None: return_xixj = False if det is None: strict = False # Get lamb min / max bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi( pts=pts, dtheta='envelop', psi='envelop', ntheta=None, npsi=None, n=n, grid=True, use_non_parallelism=use_non_parallelism, return_lamb=True, ) lambmin = np.nanmin(lamb, axis=1) lambmax = np.nanmax(lamb, axis=1) if klamb is None: klamb = np.linspace(0, 1, nlamb) elif not (isinstance(klamb, np.ndarray) and klamb.ndim == 1): msg = "Please provide klamb as a 1d vector!" raise Exception(msg) nlamb = klamb.size lamb = lambmin[:, None] + (lambmax-lambmin)[:, None]*klamb return _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj( cryst=self, lamb=lamb, n=n, ndtheta=ndtheta, pts=pts, use_non_parallelism=use_non_parallelism, return_phidtheta=return_phidtheta, return_xixj=return_xixj, strict=strict, det=det, ) def _calc_dthetapsiphi_from_lambpts( self, pts=None, bragg=None, lamb=None, n=None, ndtheta=None, use_non_parallelism=None, grid=None, ): # Check / Format inputs pts = _comp_optics._checkformat_pts(pts) npts = pts.shape[1] bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) # get nout, e1, e2 nout, e1, e2, use_non_parallelism = self.get_unit_vectors( use_non_parallelism=use_non_parallelism ) # Compute dtheta, psi, indnan (nlamb, npts, ndtheta) # In general there are 2 solutions! (only close to rowland in practice) dtheta, psi, indok, grid = _comp_optics.calc_dthetapsiphi_from_lambpts( pts, bragg, summit=self._dgeom['summit'], # To be updated (non-paralellism)? rcurve=self._dgeom['rcurve'], nout=nout, e1=e1, e2=e2, extenthalf=self._dgeom['extenthalf'], ndtheta=ndtheta, grid=grid, ) # reshape bragg for matching dtheta.shape if grid is True: bragg = np.repeat( np.repeat( np.repeat(bragg[:, None], npts, axis=-1)[..., None], dtheta.shape[2], axis=-1, )[..., None], 2, axis=-1, ) pts = pts[:, None, :, None, None] else: bragg = np.repeat( np.repeat(bragg[:, None], dtheta.shape[1], axis=1)[..., None], 2, axis=-1, ) pts = pts[..., None, None] bragg[~indok] = np.nan # Get corresponding phi and re-check bragg, for safety bragg2, phi = self.get_lambbraggphi_from_ptsxixj_dthetapsi( pts=pts, dtheta=dtheta, psi=psi, grid=False, use_non_parallelism=use_non_parallelism, return_lamb=False, ) c0 = ( bragg2.shape == bragg.shape and np.allclose(bragg, bragg2, equal_nan=True) ) if not c0: try: plt.figure() plt.plot(bragg, bragg2, '.') except Exception as err: pass msg = ( "Inconsistency detected in bragg angle computations:\n" + "\t- from the points and lamb\n" + "\t- from the points and (dtheta, psi)\n" + "\nContext:\n" + "\t- use_non_parallelism: {}\n".format(use_non_parallelism) + "\t- bragg.shape = {}\n".format(bragg.shape) + "\t- bragg2.shape = {}\n".format(bragg2.shape) ) raise Exception(msg) return dtheta, psi, phi, bragg def calc_raytracing_from_lambpts( self, lamb=None, bragg=None, pts=None, xi_bounds=None, xj_bounds=None, nphi=None, det=None, n=None, ndtheta=None, johann=False, lpsi=None, ldtheta=None, rocking=False, strict=None, plot=None, fs=None, dmargin=None, wintit=None, tit=None, proj=None, legend=None, draw=None, returnas=None, ): """ Visualize the de-focusing by ray-tracing of chosen lamb If plot, 3 different plots can be produced: - det: plots the intersection of rays with detector plane - '2d': plots the geometry of the rays in 2d cross and hor - '3d': plots the geometry of the rays in 3d Specify the plotting option by setting plot to any of these (or a list) """ # Check / format inputs if returnas is None: returnas = 'data' if plot is None or plot is True: plot = ['det', '3d'] if isinstance(plot, str): plot = plot.split('+') assert all([ss in ['det', '2d', '3d'] for ss in plot]) assert returnas in ['data', 'ax'] pts = _comp_optics._checkformat_pts(pts) npts = pts.shape[1] # Get dtheta, psi and phi from pts/lamb dtheta, psi, phi, bragg = self._calc_dthetapsiphi_from_lambpts( pts=pts, lamb=lamb, bragg=bragg, n=n, ndtheta=ndtheta, ) ndtheta = dtheta.shape[-1] # assert dtheta.shape == (nlamb, npts, ndtheta) # Check / get det det = self._checkformat_det(det) # Compute xi, xj of reflexion (phi -> phi + np.pi) xi, xj = self.calc_xixj_from_braggphi( bragg=bragg, phi=phi+np.pi, n=n, dtheta=dtheta, psi=psi, det=det, strict=strict, plot=False, ) # Plot to be checked - unnecessary ? plot = False if plot is not False: ptscryst, ptsdet = None, None if '2d' in plot or '3d' in plot: ptscryst = self.get_local_noute1e2(dtheta, psi)[0] ptsdet = (det['cent'][:, None, None, None] + xi[None, ...]*det['ei'][:, None, None, None] + xj[None, ...]*det['ej'][:, None, None, None]) ax = _plot_optics.CrystalBragg_plot_raytracing_from_lambpts( xi=xi, xj=xj, lamb=lamb, xi_bounds=xi_bounds, xj_bounds=xj_bounds, pts=pts, ptscryst=ptscryst, ptsdet=ptsdet, det_cent=det['cent'], det_nout=det['nout'], det_ei=det['ei'], det_ej=det['ej'], cryst=self, proj=plot, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, legend=legend, draw=draw) if returnas == 'ax': return ax return dtheta, psi, phi, bragg, xi, xj def _calc_spect1d_from_data2d(self, data, lamb, phi, nlambfit=None, nphifit=None, nxi=None, nxj=None, spect1d=None, mask=None, vertsum1d=None): if nlambfit is None: nlambfit = nxi if nphifit is None: nphifit = nxj return _comp_optics._calc_spect1d_from_data2d( data, lamb, phi, nlambfit=nlambfit, nphifit=nphifit, spect1d=spect1d, mask=mask, vertsum1d=vertsum1d, ) def plot_data_vs_lambphi( self, xi=None, xj=None, data=None, mask=None, det=None, dtheta=None, psi=None, n=None, nlambfit=None, nphifit=None, magaxis=None, npaxis=None, dlines=None, spect1d='mean', lambmin=None, lambmax=None, xjcut=None, dxj=None, plot=True, fs=None, tit=None, wintit=None, cmap=None, vmin=None, vmax=None, returnas=None, ): # Check / format inputs assert data is not None if returnas is None: returnas = 'spect' lreturn = ['ax', 'spect'] if returnas not in lreturn: msg = ("Arg returnas must be in {}\n:".format(lreturn) + "\t- 'spect': return a 1d vertically averaged spectrum\n" + "\t- 'ax' : return a list of axes instances") raise Exception(msg) xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj) nxi = xi.size if xi is not None else np.unique(xii).size nxj = xj.size if xj is not None else np.unique(xjj).size # Compute lamb / phi bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xii, xj=xjj, det=det, dtheta=dtheta, psi=psi, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=True, ) # Compute lambfit / phifit and spectrum1d (spect1d, lambfit, phifit, vertsum1d, phiminmax) = self._calc_spect1d_from_data2d( data, lamb, phi, nlambfit=nlambfit, nphifit=nphifit, nxi=nxi, nxj=nxj, spect1d=spect1d, mask=mask, vertsum1d=True ) # Get phiref from mag axis lambax, phiax = None, None if magaxis is not None: if npaxis is None: npaxis = 1000 thetacryst = np.arctan2(self._dgeom['summit'][1], self._dgeom['summit'][0]) thetaax = thetacryst + np.pi/2*np.linspace(-1, 1, npaxis) pts = np.array([magaxis[0]*np.cos(thetaax), magaxis[0]*np.sin(thetaax), np.full((npaxis,), magaxis[1])]) braggax, phiax = self.calc_braggphi_from_pts(pts) lambax = self.get_lamb_from_bragg(braggax) phiax = np.arctan2(np.sin(phiax-np.pi), np.cos(phiax-np.pi)) ind = ((lambax >= lambfit[0]) & (lambax <= lambfit[-1]) & (phiax >= phifit[0]) & (phiax <= phifit[-1])) lambax, phiax = lambax[ind], phiax[ind] ind = np.argsort(lambax) lambax, phiax = lambax[ind], phiax[ind] # Get lamb / phi for xj lambcut, phicut, spectcut = None, None, None if xjcut is not None: if dxj is None: dxj = 0.002 xjcut = np.sort(np.atleast_1d(xjcut).ravel()) xicutf = np.tile(xi, (xjcut.size, 1)) xjcutf = np.repeat(xjcut[:, None], nxi, axis=1) ( braggcut, phicut, lambcut, ) = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xicutf, xj=xjcutf, det=det, dtheta=0, psi=0, use_non_parallelism=use_non_parallelism, n=1, grid=True, return_lamb=True, ) indxj = [(np.abs(xj-xjc) <= dxj).nonzero()[0] for xjc in xjcut] spectcut = np.array([np.nanmean(data[ixj, :], axis=0) for ixj in indxj]) # plot ax = None if plot: ax = _plot_optics.CrystalBragg_plot_data_vs_lambphi( xi, xj, bragg, lamb, phi, data, lambfit=lambfit, phifit=phifit, spect1d=spect1d, vertsum1d=vertsum1d, lambax=lambax, phiax=phiax, lambmin=lambmin, lambmax=lambmax, phiminmax=phiminmax, xjcut=xjcut, lambcut=lambcut, phicut=phicut, spectcut=spectcut, cmap=cmap, vmin=vmin, vmax=vmax, dlines=dlines, tit=tit, wintit=wintit, fs=fs) if returnas == 'spect': return spect1d, lambfit elif returnas == 'ax': return ax def get_plasmadomain_at_lamb( self, config=None, struct=None, domain=None, res=None, det=None, xixj_lim=None, strict=None, bragg=None, lamb=None, # for available lamb determination ndtheta=None, nlamb=None, n=None, use_non_parallelism=None, # plotting plot=None, dax=None, plot_as=None, lcolor=None, return_dax=None, ): """ Return pts in the plasma domain and a mask The mask is True only for points for which the desired wavelength is accesible from the crystal (and from the detector if strict=True and det is provided) More than one value of lamb can be provided (nlamb >= 1) pts is returned as a (3, npts) array lambok is returned as a (nlamb, npts) array """ # ------------ # check inputs struct = _check_optics._check_config_get_Ves( config=config, struct=struct, ) bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) lamb = self.get_lamb_from_bragg(bragg=bragg, n=n) # To be refined if xjlim is narrow if ndtheta is None: ndtheta = 5 # To be refined if xilim is narrow if nlamb is None: nlamb = 11 if strict is None: strict = True if plot is None: plot = True if return_dax is None: return_dax = plot is True # ------------- # sample volume ( pts, dV, ind, (resR, resZ, resPhi), ) = config.dStruct['dObj']['Ves'][struct].get_sampleV( res=res, domain=domain, returnas='(R, Z, Phi)', ) # ------------------------------ # check access from crystal only ptsXYZ = np.array([ pts[0, :]*np.cos(pts[2, :]), pts[0, :]*np.sin(pts[2, :]), pts[1, :], ]) lamb_access = self.get_lamb_avail_from_pts( pts=ptsXYZ, nlamb=2, use_non_parallelism=use_non_parallelism, return_phidtheta=False, return_xixj=False, strict=False, ) lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool) for ii, ll in enumerate(lamb): lambok[ii, :] = ( (lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1]) ) # --------------- # refactor pts and lambok indok = np.any(lambok, axis=0) pts = pts[:, indok] ptsXYZ = ptsXYZ[:, indok] lambok = lambok[:, indok] # --------------- # check strict if strict is True: # det vs detbis if xixj_lim detbis = dict(det) if xixj_lim is not None: detbis['outline'] = np.array([ np.r_[ xixj_lim[0][0], xixj_lim[0][1]*np.r_[1, 1], xixj_lim[0][0], ], np.r_[ xixj_lim[1][0]*np.r_[1, 1], xixj_lim[1][1]*np.r_[1, 1], ], ]) detbis['outline'] = np.concatenate( (detbis['outline'], detbis['outline'][:, 0:1]), axis=1, ) # intersection with detbis for kk, ll in enumerate(lamb): lambi = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj( cryst=self, lamb=np.full((lambok[kk, :].sum(), 1), ll), n=n, ndtheta=ndtheta, pts=ptsXYZ[:, lambok[kk, :]], use_non_parallelism=use_non_parallelism, return_phidtheta=False, return_xixj=False, strict=strict, det=detbis, ) lambok[kk, lambok[kk, :]] = ~np.isnan(lambi[:, 0]) # ------- # return if plot: dax = _plot_optics.CrystalBragg_plot_plasma_domain_at_lamb( cryst=self, det=det, xixj_lim=xixj_lim, config=config, lamb=lamb, pts=pts, reseff=[resR, resZ, resPhi], lambok=lambok, dax=dax, plot_as=plot_as, lcolor=lcolor, ) # --------------- # return if return_dax is True: return pts, lambok, dax else: return pts, lambok def calc_signal_from_emissivity( self, emis=None, config=None, struct=None, domain=None, res=None, det=None, xixj_lim=None, strict=None, bragg=None, lamb=None, binning=None, # for available lamb determination ndtheta=None, nlamb=None, n=None, use_non_parallelism=None, # plotting plot=None, vmin=None, vmax=None, vmin_bin=None, vmax_bin=None, cmap=None, dax=None, fs=None, dmargin=None, tit=None, return_dax=None, ): """ Return pts in the plasma domain and a mask The mask is True only for points for which the desired wavelength is accesible from the crystal (and from the detector if strict=True and det is provided) More than one value of lamb can be provided (nlamb >= 1) pts is returned as a (3, npts) array lambok is returned as a (nlamb, npts) array """ # ------------ # check inputs ( struct, lamb, binning, ) = _check_optics._check_calc_signal_from_emissivity( emis=emis, config=config, struct=struct, lamb=lamb, det=det, binning=binning, ) bragg = self._checkformat_bragglamb(bragg=bragg, lamb=lamb, n=n) lamb = self.get_lamb_from_bragg(bragg=bragg, n=n) # To be refined if xjlim is narrow if ndtheta is None: ndtheta = 5 # To be refined if xilim is narrow if nlamb is None: nlamb = 11 if strict is None: strict = True if plot is None: plot = True if return_dax is None: return_dax = plot is True # ------------- # sample volume ( pts, dV, ind, (resR, resZ, resPhi), ) = config.dStruct['dObj']['Ves'][struct].get_sampleV( res=res, domain=domain, returnas='(R, Z, Phi)', ) # ------------------------------ # check access from crystal only ptsXYZ = np.array([ pts[0, :]*np.cos(pts[2, :]), pts[0, :]*np.sin(pts[2, :]), pts[1, :], ]) lamb_access = self.get_lamb_avail_from_pts( pts=ptsXYZ, nlamb=2, use_non_parallelism=use_non_parallelism, return_phidtheta=False, return_xixj=False, strict=False, ) lambok = np.zeros((lamb.size, pts.shape[1]), dtype=bool) for ii, ll in enumerate(lamb): lambok[ii, :] = ( (lamb_access[:, 0] <= ll) & (ll <= lamb_access[:, 1]) ) # --------------- # refactor pts and lambok indok = np.any(lambok, axis=0) pts = pts[:, indok] ptsXYZ = ptsXYZ[:, indok] lambok = lambok[:, indok] # --------------- # check strict # det vs detbis if xixj_lim detbis = dict(det) if xixj_lim is not None: detbis['outline'] = np.array([ np.r_[ xixj_lim[0][0], xixj_lim[0][1]*np.r_[1, 1], xixj_lim[0][0], ], np.r_[ xixj_lim[1][0]*np.r_[1, 1], xixj_lim[1][1]*np.r_[1, 1], ], ]) detbis['outline'] = np.concatenate( (detbis['outline'], detbis['outline'][:, 0:1]), axis=1, ) # intersection with detbis shape = tuple(np.r_[pts.shape[1], lamb.size, ndtheta, 2]) xi = np.full(shape, np.nan) xj = np.full(shape, np.nan) val = np.full(shape, np.nan) for kk, ll in enumerate(lamb): ( lambi, xii, xji, ) = _comp_optics._get_lamb_avail_from_pts_phidtheta_xixj( cryst=self, lamb=np.full((lambok[kk, :].sum(), 1), ll), n=n, ndtheta=ndtheta, pts=ptsXYZ[:, lambok[kk, :]], use_non_parallelism=use_non_parallelism, return_phidtheta=False, return_xixj=True, strict=True, det=detbis, ) iok = ~np.isnan(lambi[:, 0]) iokf = lambok[kk, :].nonzero()[0][iok] lambok[kk, lambok[kk, :]] = iok xi[iokf, kk, :, :] = xii[iok, 0, :, :] xj[iokf, kk, :, :] = xji[iok, 0, :, :] val[iokf, kk, :, :] = emis( r=pts[0, iokf], z=pts[1, iokf], phi=pts[2, iokf], lamb=lamb[kk:kk+1], t=None, )[:, 0, None, None] # ------- # Optional binning binned = None if binning is not False: iok = np.isfinite(val) binned = scpstats.binned_statistic_2d( xi[iok].ravel(), xj[iok].ravel(), val[iok].ravel(), statistic='mean', bins=binning, expand_binnumbers=False, )[0] # ------- # return if plot: dax = _plot_optics.CrystalBragg_plot_signal_from_emissivity( cryst=self, det=det, xixj_lim=xixj_lim, config=config, lamb=lamb, pts=pts, reseff=[resR, resZ, resPhi], xi=xi, xj=xj, val=val, lambok=lambok, binning=binning, binned=binned, # plotting vmin=vmin, vmax=vmax, vmin_bin=vmin_bin, vmax_bin=vmax_bin, cmap=cmap, dax=dax, fs=fs, dmargin=dmargin, tit=tit, ) # --------------- # return if return_dax is True: return pts, val, xi, xj, binned, dax else: return pts, val, xi, xj, binned @staticmethod def fit1d_dinput( dlines=None, dconstraints=None, dprepare=None, data=None, lamb=None, mask=None, domain=None, pos=None, subset=None, same_spectrum=None, same_spectrum_dlamb=None, focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None, valid_return_fract=None, ): """ Return a formatted dict of lines and constraints To be fed to _fit12d.multigausfit1d_from_dlines() Provides a user-friendly way of defining constraints """ import tofu.spectro._fit12d as _fit12d return _fit12d.fit1d_dinput( dlines=dlines, dconstraints=dconstraints, dprepare=dprepare, data=data, lamb=lamb, mask=mask, domain=domain, pos=pos, subset=subset, same_spectrum=same_spectrum, same_spectrum_dlamb=same_spectrum_dlamb, focus=focus, valid_fraction=valid_fraction, valid_nsigma=valid_nsigma, focus_half_width=focus_half_width, valid_return_fract=valid_return_fract) def fit1d( self, # Input data kwdargs data=None, lamb=None, dinput=None, dprepare=None, dlines=None, dconstraints=None, mask=None, domain=None, subset=None, pos=None, same_spectrum=None, same_spectrum_dlamb=None, focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None, # Optimization kwdargs dx0=None, dscales=None, x0_scale=None, bounds_scale=None, method=None, tr_solver=None, tr_options=None, max_nfev=None, xtol=None, ftol=None, gtol=None, loss=None, verbose=None, chain=None, jac=None, showonly=None, # Results extraction kwdargs amp=None, coefs=None, ratio=None, Ti=None, width=None, vi=None, shift=None, pts_lamb_total=None, pts_lamb_detail=None, # Saving and plotting kwdargs save=None, name=None, path=None, plot=None, fs=None, dmargin=None, tit=None, wintit=None, returnas=None, ): # ---------------------- # Get dinput for 1d fitting from dlines, dconstraints, dprepare... if dinput is None: dinput = self.fit1d_dinput( dlines=dlines, dconstraints=dconstraints, dprepare=dprepare, data=data, lamb=lamb, mask=mask, domain=domain, pos=pos, subset=subset, focus=focus, valid_fraction=valid_fraction, valid_nsigma=valid_nsigma, focus_half_width=focus_half_width, same_spectrum=same_spectrum, same_spectrum_dlamb=same_spectrum_dlamb) # ---------------------- # return import tofu.spectro._fit12d as _fit12d return _fit12d.fit1d( # Input data kwdargs data=data, lamb=lamb, dinput=dinput, dprepare=dprepare, dlines=dlines, dconstraints=dconstraints, mask=mask, domain=domain, subset=subset, pos=pos, # Optimization kwdargs method=method, tr_solver=tr_solver, tr_options=tr_options, xtol=xtol, ftol=ftol, gtol=gtol, max_nfev=max_nfev, loss=loss, chain=chain, dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale, jac=jac, verbose=verbose, save=save, name=name, path=path, amp=amp, coefs=coefs, ratio=ratio, Ti=Ti, width=width, vi=vi, shift=shift, pts_lamb_total=pts_lamb_total, pts_lamb_detail=pts_lamb_detail, plot=plot, fs=fs, wintit=wintit, tit=tit) @staticmethod def fit1d_extract( dfit1d=None, amp=None, coefs=None, ratio=None, Ti=None, width=None, vi=None, shift=None, pts_lamb_total=None, pts_lamb_detail=None, ): import tofu.spectro._fit12d as _fit12d return _fit12d.fit1d_extract( dfit1d=dfit, amp=amp, coefs=coefs, ratio=ratio, Ti=Ti, width=width, vi=vi, shift=shift, pts_lamb_total=pts_lamb_total, pts_lamb_detail=pts_lamb_detail) def fit1d_from2d(self): """ Useful for optimizing detector or crystal position Given a set of 2d images on a detector Transform the 2d (xi, xj) image into (lamb, phi) Slice nphi 1d spectra Fit them using a dict of reference lines (dlines) Optionally provide constraints for the fitting Return the vertical profiles of the wavelength shitf of each line To be used as input for an cost function and optimization 1d fitting is used instead of 2d because: - faster (for optimization) - does not require a choice of nbsplines - easier to understand and decide for user """ # Check / format inputs if lphi is None: msg = ("Arg lphi must be provided !") raise Exception(msg) # ---------------------- # Prepare input data # (geometrical transform, domain, binning, subset, noise...) if dprepare is None: dprepare = self.fit2d_prepare( data=data, xi=xi, xj=xj, n=n, det=det, dtheta=dtheta, psi=psi, mask=mask, domain=domain, pos=pos, binning=binning, nbsplines=False, subset=False, lphi=lphi, lphi_tol=lphi_tol) # ---------------------- # Get dinput for 2d fitting from dlines, and dconstraints if dinput is None: dinput = self.fit2d_dinput( dlines=dlines, dconstraints=dconstraints, deg=deg, knots=knots, nbsplines=nbsplines, domain=dprepare['domain'], dataphi1d=dprepare['dataphi1d'], phi1d=dprepare['phi1d']) # ---------------------- # fit out = self.fit1d( xi=None, xj=None, data=None, mask=None, det=None, dtheta=None, psi=None, n=None, nlambfit=None, nphifit=None, lambmin=None, lambmax=None, dlines=None, spect1d=None, dconstraints=None, dx0=None, same_spectrum=None, dlamb=None, double=None, dscales=None, x0_scale=None, bounds_scale=None, method=None, max_nfev=None, xtol=None, ftol=None, gtol=None, loss=None, verbose=0, chain=None, jac=None, showonly=None, plot=None, fs=None, dmargin=None, tit=None, wintit=None, returnas=None, ) pass def fit2d_dinput( self, dlines=None, dconstraints=None, dprepare=None, data=None, xi=None, xj=None, n=None, det=None, dtheta=None, psi=None, mask=None, domain=None, pos=None, binning=None, subset=None, # lphi=None, lphi_tol=None, deg=None, knots=None, nbsplines=None, focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None, valid_return_fract=None, ): """ Return a formatted dict of lines and constraints To be fed to _fit12d.multigausfit1d_from_dlines() Provides a user-friendly way of defining constraints """ import tofu.spectro._fit12d as _fit12d if dprepare is None: # ---------------------- # Geometrical transform xi, xj, (xii, xjj) = _comp_optics._checkformat_xixj(xi, xj) nxi = xi.size if xi is not None else np.unique(xii).size nxj = xj.size if xj is not None else np.unique(xjj).size # Compute lamb / phi bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xii, xj=xjj, det=det, dtheta=dtheta, psi=psi, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=True, ) # ---------------------- # Prepare input data (domain, binning, subset, noise...) dprepare = _fit12d.multigausfit2d_from_dlines_prepare( data, lamb, phi, mask=mask, domain=domain, pos=pos, binning=binning, nbsplines=nbsplines, subset=subset, nxi=nxi, nxj=nxj, ) # , lphi=lphi, lphi_tol=lphi_tol) return _fit12d.fit2d_dinput( dlines=dlines, dconstraints=dconstraints, dprepare=dprepare, deg=deg, knots=knots, nbsplines=nbsplines, focus=focus, valid_fraction=valid_fraction, valid_nsigma=valid_nsigma, focus_half_width=focus_half_width, valid_return_fract=valid_return_fract) def fit2d( self, # Input data kwdargs data=None, xi=None, xj=None, det=None, dtheta=None, psi=None, n=None, dinput=None, dprepare=None, dlines=None, dconstraints=None, mask=None, domain=None, subset=None, pos=None, binning=None, focus=None, valid_fraction=None, valid_nsigma=None, focus_half_width=None, deg=None, knots=None, nbsplines=None, # Optimization kwdargs dx0=None, dscales=None, x0_scale=None, bounds_scale=None, method=None, tr_solver=None, tr_options=None, max_nfev=None, xtol=None, ftol=None, gtol=None, loss=None, verbose=None, chain=None, jac=None, showonly=None, predeclare=None, debug=None, # Results extraction kwdargs amp=None, coefs=None, ratio=None, Ti=None, width=None, vi=None, shift=None, pts_lamb_total=None, pts_lamb_detail=None, # Saving and plotting kwdargs save=None, name=None, path=None, plot=None, fs=None, dmargin=None, tit=None, wintit=None, returnas=None, ): # npts=None, dax=None, # spect1d=None, nlambfit=None, # plotmode=None, angunits=None, indspect=None, # cmap=None, vmin=None, vmax=None): """ Perform 2d fitting of a 2d spectrometre image Fit the spectrum by a sum of gaussians Modulate each gaussian parameters by bsplines in the spatial direction data must be provided in shape (nt, nxi, nxj), where: - nt is the number of time steps - nxi is the nb. of pixels in the horizontal / spectral direction - nxj is the nb. of pixels in the vertical / spacial direction """ # ---------------------- # Geometrical transform in dprepare if dinput is None: dinput = self.fit2d_dinput( dlines=dlines, dconstraints=dconstraints, dprepare=dprepare, data=data, xi=xi, xj=xj, n=n, det=det, dtheta=dtheta, psi=psi, mask=mask, domain=domain, pos=pos, binning=binning, subset=subset, deg=deg, knots=knots, nbsplines=nbsplines, focus=focus, valid_fraction=valid_fraction, valid_nsigma=valid_nsigma, focus_half_width=focus_half_width) # ---------------------- # return import tofu.spectro._fit12d as _fit12d return _fit12d.fit2d( dinput=dinput, dprepare=dprepare, dlines=dlines, dconstraints=dconstraints, lamb=lamb, phi=phi, data=data, mask=mask, nxi=dinput['dprepare']['nxi'], nxj=dinput['dprepare']['nxj'], domain=domain, pos=pos, binning=binning, subset=subset, deg=deg, knots=knots, nbsplines=nbsplines, method=method, tr_solver=tr_solver, tr_options=tr_options, xtol=xtol, ftol=ftol, gtol=gtol, max_nfev=max_nfev, loss=loss, chain=chain, dx0=dx0, x0_scale=x0_scale, bounds_scale=bounds_scale, jac=jac, verbose=verbose, save=save, name=name, path=path, plot=plot) @staticmethod def fit2d_extract(dfit2d=None, amp=None, Ti=None, vi=None, pts_phi=None, npts_phi=None, pts_lamb_phi_total=None, pts_lamb_phi_detail=None): import tofu.spectro._fit12d as _fit12d return _fit12d.fit2d_extract_data( dfit2d=dfit2d, amp=amp, Ti=Ti, vi=vi, pts_phi=pts_phi, npts_phi=npts_phi, pts_lamb_phi_total=pts_lamb_phi_total, pts_lamb_phi_detail=pts_lamb_phi_detail) def fit2d_plot(self, dfit2d=None, ratio=None, dax=None, plotmode=None, angunits=None, cmap=None, vmin=None, vmax=None, dmargin=None, tit=None, wintit=None, fs=None): dout = self.fit2d_extract( dfit2d, amp=amp, Ti=Ti, vi=vi, pts_lamb_phi_total=pts_lamb_phi_total, pts_lamb_phi_detail=pts_lamb_phi_detail) return _plot_optics.CrystalBragg_plot_data_fit2d( dfit2d=dfit2d, dout=dout, ratio=ratio, dax=dax, plotmode=plotmode, angunits=angunits, cmap=cmap, vmin=vmin, vmax=vmax, dmargin=dmargin, tit=tit, wintit=wintit, fs=fs) def noise_analysis( self, data=None, xi=None, xj=None, n=None, det=None, dtheta=None, psi=None, mask=None, valid_fraction=None, nxerrbin=None, margin=None, domain=None, nlamb=None, deg=None, knots=None, nbsplines=None, loss=None, max_nfev=None, xtol=None, ftol=None, gtol=None, method=None, tr_solver=None, tr_options=None, verbose=None, plot=None, ms=None, dcolor=None, dax=None, fs=None, dmargin=None, wintit=None, tit=None, sublab=None, save_fig=None, name_fig=None, path_fig=None, fmt=None, return_dax=None, ): # ---------------------- # Geometrical transform bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xi, xj=xj, det=det, dtheta=dtheta, psi=psi, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=True, ) import tofu.spectro._fit12d as _fit12d return _fit12d.noise_analysis_2d( data, lamb, phi, mask=mask, valid_fraction=valid_fraction, margin=margin, nxerrbin=nxerrbin, nlamb=nlamb, deg=deg, knots=knots, nbsplines=nbsplines, loss=loss, max_nfev=max_nfev, xtol=xtol, ftol=ftol, gtol=gtol, method=method, tr_solver=tr_solver, tr_options=tr_options, verbose=verbose, plot=plot, ms=ms, dcolor=dcolor, dax=dax, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, sublab=sublab, save_fig=save_fig, name_fig=name_fig, path_fig=path_fig, fmt=fmt, return_dax=return_dax) @staticmethod def noise_analysis_plot( dnoise=None, margin=None, valid_fraction=None, ms=None, dcolor=None, dax=None, fs=None, dmargin=None, wintit=None, tit=None, sublab=None, save=None, name=None, path=None, fmt=None, ): import tofu.spectro._plot as _plot_spectro return _plot_spectro.plot_noise_analysis( dnoise=dnoise, margin=margin, valid_fraction=valid_fraction, ms=ms, dcolor=dcolor, dax=dax, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, sublab=sublab, save=save, name=name, path=path, fmt=fmt) def noise_analysis_scannbs( self, data=None, xi=None, xj=None, n=None, det=None, dtheta=None, psi=None, mask=None, nxerrbin=None, domain=None, nlamb=None, deg=None, knots=None, nbsplines=None, lnbsplines=None, loss=None, max_nfev=None, xtol=None, ftol=None, gtol=None, method=None, tr_solver=None, tr_options=None, verbose=None, plot=None, ms=None, dax=None, fs=None, dmargin=None, wintit=None, tit=None, sublab=None, save_fig=None, name_fig=None, path_fig=None, fmt=None, return_dax=None, ): # ---------------------- # Geometrical transform bragg, phi, lamb = self.get_lambbraggphi_from_ptsxixj_dthetapsi( xi=xi, xj=xj, det=det, dtheta=0, psi=0, use_non_parallelism=use_non_parallelism, n=n, grid=True, return_lamb=True, ) import tofu.spectro._fit12d as _fit12d return _fit12d.noise_analysis_2d_scannbs( data, lamb, phi, mask=mask, nxerrbin=nxerrbin, nlamb=nlamb, deg=deg, knots=knots, nbsplines=nbsplines, lnbsplines=lnbsplines, loss=loss, max_nfev=max_nfev, xtol=xtol, ftol=ftol, gtol=gtol, method=method, tr_solver=tr_solver, tr_options=tr_options, verbose=verbose, plot=plot, ms=ms, dax=dax, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, sublab=sublab, save_fig=save_fig, name_fig=name_fig, path_fig=path_fig, fmt=fmt, return_dax=return_dax) @staticmethod def noise_analysis_scannbs_plot( dnoise_scan=None, ms=None, dax=None, fs=None, dmargin=None, wintit=None, tit=None, sublab=None, save=None, name=None, path=None, fmt=None, ): import tofu.spectro._plot as _plot_spectro return _plot_spectro.plot_noise_analysis_scannbs( dnoise=dnoise_scan, ms=ms, dax=dax, fs=fs, dmargin=dmargin, wintit=wintit, tit=tit, sublab=sublab, save=save, name=name, path=path, fmt=fmt)
import tempfile from requre.helpers.simple_object import Simple from requre.storage import PersistentObjectStorage from requre.utils import run_command, StorageMode from tests.testbase import BaseClass class StoreFunctionOutput(BaseClass): @staticmethod @Simple.decorator_plain() def run_command_wrapper(cmd, error_message=None, cwd=None, fail=True, output=False): return run_command( cmd=cmd, error_message=error_message, cwd=cwd, fail=fail, output=output ) def test_a(self): self.assertIn("bin", Simple.decorator_plain()(run_command)("ls /", output=True)) self.assertIn("bin", Simple.decorator_plain()(run_command)("ls /", output=True)) def test_run_command_true(self): """ Test if session recording is able to store and return output from command via decorating run_command """ output = self.run_command_wrapper(cmd=["true"]) self.assertTrue(output) PersistentObjectStorage().cassette.dump() PersistentObjectStorage().cassette.mode = StorageMode.read before = str(PersistentObjectStorage().cassette.storage_object) output = self.run_command_wrapper(cmd=["true"]) after = str(PersistentObjectStorage().cassette.storage_object) self.assertTrue(output) self.assertIn("True", before) self.assertNotIn("True", after) self.assertGreater(len(before), len(after)) def test_run_command_output(self): """ check if wrapper returns proper string values in calls """ self.file_name = tempfile.mktemp() with open(self.file_name, "w") as fd: fd.write("ahoj\n") output = self.run_command_wrapper(cmd=["cat", self.file_name], output=True) self.assertIn("ahoj", output) PersistentObjectStorage().cassette.dump() PersistentObjectStorage().cassette.mode = StorageMode.read with open(self.file_name, "a") as fd: fd.write("cao\n") output = self.run_command_wrapper(cmd=["cat", self.file_name], output=True) self.assertIn("ahoj", output) self.assertNotIn("cao", output) PersistentObjectStorage().cassette.mode = StorageMode.write output = self.run_command_wrapper(cmd=["cat", self.file_name], output=True) self.assertIn("cao", output) PersistentObjectStorage().cassette.dump() PersistentObjectStorage().cassette.mode = StorageMode.read output = self.run_command_wrapper(cmd=["cat", self.file_name], output=True) self.assertIn("cao", output)
# Button mapping # Button indices start at 0 at the bottom of the remote, opposite the IR blaster # i.e. The unlit button is 0 # The indices must be from 0 to 5 only Button_HDMI = 5 Button_ChannelUp = 4 Button_ChannelDown = 3 Button_TvOnOff = 2 Button_Mute = 1 Button_Extra = 0 # Volume control # 1200 ticks per dial revolution TicksPerVolumeCommand = 12 DeadzoneTicks = 30 DeadzoneResetTimeMilliseconds = 200 PositiveVolumeDirection = "CW" # Can be "CW" or "CCW" for clockwise and counterclockwise # LEDs ButtonBrightnessPercent_ON = 60 # Percent between 0 and 100 ButtonBrightnessPercent_DIM = 15 # Percent between 0 and 100 ButtonDimTimeoutSeconds = 10 # Number of seconds before buttons dim after no activity VolumeDialBrightensLEDs = 1 # Controls whether volume dial activity brightens buttons - Can be 0 or 1 ButtonOffWhenPressedTimeSeconds = 0.5 # Number of seconds a button LED turns off for when pressed - Set to 0 to disable
from maju.api import api from maju.config import config_db from maju.api.healthcheck.view import ns as healthcheck from maju.api.playlist.view import ns as playlist from maju.api.statistics.view import ns as statistics from flask import Flask, Blueprint from flask_cors import CORS def create_app(config_filename=None): app = Flask(__name__) CORS(app, resources={r"/*": {"origins": "*"}}) connect_string = config_db()['database_uri'] app.config['SQLALCHEMY_DATABASE_URI'] = connect_string app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['RESTPLUS_VALIDATE'] = True app.config['RESTPLUS_MASK_SWAGGER'] = False blueprint = Blueprint('login', __name__) api.init_app(blueprint) api.add_namespace(healthcheck, "/healthcheck") api.add_namespace(playlist, "/playlist") api.add_namespace(statistics, "/statistics") app.register_blueprint(blueprint) app.teardown_appcontext(shutdown_session) return app def shutdown_session(exception=None): from maju.database import session session.remove()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ---------------------------------------------------------------- # test_openingBraces.py # # test for openingBraces rule # ---------------------------------------------------------------- # copyright (c) 2014 - Domen Ipavec # Distributed under The MIT License, see LICENSE # ---------------------------------------------------------------- import unittest from cssqc.parser import CSSQC from cssqc.qualityWarning import QualityWarning class Test_openingBraces(unittest.TestCase): def parse(self, data): c = CSSQC({"openingBraces": "exact"}) c.parse(data) return c def test_opening_braces(self): sample = '''div { margin: 0; } span{ color: blue; } .class1 { color: #fff; } .class2 { margin: 0; } .class3 { margin: 0; padding: 0 }''' c = self.parse(sample) self.assertEqual(c.warnings, [ QualityWarning('openingBraces', 4), QualityWarning('openingBraces', 8), QualityWarning('openingBraces', 12) ])
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-05 19:41 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Pago', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('number', models.CharField(max_length=16)), ('expire_month', models.IntegerField()), ('expire_year', models.IntegerField()), ('cvv2', models.CharField(max_length=3)), ('first_name', models.CharField(max_length=100)), ('last_name', models.CharField(max_length=100)), ('total', models.DecimalField(decimal_places=2, max_digits=7)), ], ), ]
#!/usr/bin/env python3 # Import the API module import bosdyn.client # Create an sdk object (the name is arbitrary) sdk = bosdyn.client.create_standard_sdk('understanding-spot') # Create a connection to the robot robot = sdk.create_robot('192.168.50.3') # Get the client ID id_client = robot.ensure_client('robot-id') spot_id = id_client.get_id() print(f'Spot Id:\n{spot_id}') # Log into the robot robot.authenticate('student_HSD', 'dgHGcrD43SCgl') # Get the robot state state_client = robot.ensure_client('robot-state') spot_state = state_client.get_robot_state() print(f'Spot State:\n{spot_state}') # Create an estop client and get the estop status estop_client = robot.ensure_client('estop') spot_estop_status = estop_client.get_status() print(f'Spot estop status:\n{spot_estop_status}') # Create an EStop end point estop_endpoint = bosdyn.client.estop.EstopEndpoint(client=estop_client, name='my_estop', estop_timeout=9.0) estop_endpoint.force_simple_setup() print('Spot estopped') # Spot will be estopped at this point # To clear the estop, you must establish a keep-alive estop_keep_alive = bosdyn.client.estop.EstopKeepAlive(estop_endpoint) spot_estop_status = estop_client.get_status() print(f'Spot estop status:\n{spot_estop_status}') # List current leases lease_client = robot.ensure_client('lease') spot_lease_list = lease_client.list_leases() print(f'Spot lease list:\n{spot_lease_list}') # To obtain a lease lease_keep_alive = bosdyn.client.lease.LeaseKeepAlive(lease_client) lease = lease_client.acquire() spot_lease_list = lease_client.list_leases() print(f'Spot lease list:\n{spot_lease_list}') # Powering Spot on robot.power_on(timeout_sec=20) spot_is_on = robot.is_powered_on() print(f'Spot is powered { "up" if spot_is_on else "down" }') # Establish timesync robot.time_sync.wait_for_sync() # Making Spot roll over print('Spot rolling to the right in 5 seconds. PLEASE STAND CLEAR.') from bosdyn.client.robot_command import RobotCommandClient, blocking_stand command_client = robot.ensure_client(RobotCommandClient.default_service_name) import time time.sleep(5.0) # Belly-rub from bosdyn.client.robot_command import RobotCommandBuilder belly_rub = RobotCommandBuilder.battery_change_pose_command(dir_hint=1) # 1 = right / 2 = left command_client.robot_command(belly_rub) time.sleep(7.0) # EStop (cut_immediately=False will cause Spot to sit down before powering off # cut_immediately=True will cause power to be cut immediately, and Spot will # collapse) robot.power_off(cut_immediately=False)
from PyInstaller.compat import is_darwin import os if is_darwin: # Assume we're using homebrew to install GDAL and collect data files # accordingly. from PyInstaller.utils.hooks import get_homebrew_path from PyInstaller.utils.hooks import collect_system_data_files datas = collect_system_data_files( path=os.path.join(get_homebrew_path('gdal'), 'share', 'gdal'), destdir='gdal-data')
import _pickle import json INDIR = '/data/bugliarello.e/data/gqa/lxmert' OUTDIR = '/data/bugliarello.e/data/gqa/cache' f = json.load(open(INDIR + '/trainval_label2ans.json')) _pickle.dump(f, open(OUTDIR + '/trainval_label2ans.pkl', 'wb')) a2l = json.load(open(INDIR + '/trainval_ans2label.json')) _pickle.dump(a2l, open(OUTDIR + '/trainval_ans2label.pkl', 'wb')) f = json.load(open(INDIR + '/train.json')) fht = [] for e in f: fht.append({'image_id': int(e['img_id']), 'labels': [a2l[k] for k,v in e['label'].items()], 'scores': [v for k,v in e['label'].items()], 'question_id': e['question_id'], 'question': e['sent']}) _pickle.dump(fht, open(OUTDIR + '/train_target.pkl', 'wb')) f = json.load(open(INDIR + '/valid.json')) fhv = [] for e in f: fhv.append({'image_id': int(e['img_id']), 'labels': [a2l[k] for k,v in e['label'].items()], 'scores': [v for k,v in e['label'].items()], 'question_id': e['question_id'], 'question': e['sent']}) _pickle.dump(fhv, open(OUTDIR + '/val_target.pkl', 'wb')) fh = fht + fhv _pickle.dump(fh, open(OUTDIR + '/trainval_target.pkl', 'wb'))
#***************************************************************** # terraform-provider-vcloud-director # Copyright (c) 2017 VMware, Inc. All Rights Reserved. # SPDX-License-Identifier: BSD-2-Clause #***************************************************************** import grpc import errors import logging from lxml import etree from pyvcloud.vcd.vm import VM from pyvcloud.vcd.org import Org from pyvcloud.vcd.vdc import VDC from pyvcloud.vcd.vapp import VApp from vcd_client_ref import VCDClientRef from pyvcloud.vcd.client import TaskStatus from pyvcloud.vcd.client import EntityType from proto import vapp_vm_pb2 as vapp_vm_pb2 from proto import vapp_vm_pb2_grpc as vapp_vm_pb2_grpc class VappVmServicer(vapp_vm_pb2_grpc.VappVmServicer): def __init__(self, pyPluginServer): self.py_plugin_server = pyPluginServer vref = VCDClientRef() self.client = vref.get_ref() def Create(self, request, context): logging.basicConfig(level=logging.DEBUG) logging.info("__INIT__Create[VappVmServicer]") source_catalog_name = request.source_catalog_name if len(source_catalog_name) > 0: res = self.CreateFromCatalog(request, context) else: res = self.CreateFromVapp(request, context) return res def CreateFromCatalog(self, request, context): vapp_vm = VappVm(context) res = vapp_vm.create_from_catalog(request) logging.info("__DONE__Create[VappVmServicer]") return res def CreateFromVapp(self, request, context): vapp_vm = VappVm(context) res = vapp_vm.create_from_vapp(request) logging.info("__DONE__Create[VappVmServicer]") return res def Delete(self, request, context): logging.info("__INIT__Delete[VappVmServicer]") vapp_vm = VappVm(context) res = vapp_vm.delete(request) logging.info("__DONE__Delete[VappVmServicer]") return res # def Update(self, request, context): # logging.info("__INIT__Update[VappVmServicer]") # target_vm_name = request.target_vm_name # is_enabled = request.is_enabled # vapp_vm = VappVm(target_vm_name=target_vm_name) # res = vapp_vm.update() # logging.info("__DONE__Update[VappVmServicer]") # return res def Read(self, request, context): logging.info("__INIT__Read[VappVmServicer]") vapp_vm = VappVm(context) res = vapp_vm.read(request) logging.info("__DONE__Read[VappVmServicer]") return res def ModifyCPU(self, request, context): logging.info("__INIT__ModifyCPU[VappVmServicer]") vapp_vm = VappVm(context) res = vapp_vm.modify_cpu(request) logging.info("__DONE__ModifyCPU[VappVmServicer]") return res def ModifyMemory(self, request, context): logging.info("__INIT__ModifyMemory[VappVmServicer]") vapp_vm = VappVm(context) res = vapp_vm.modify_memory(request) logging.info("__DONE__ModifyMemory[VappVmServicer]") return res def PowerOn(self, request, context): logging.info("__INIT__PowerOn[VappVmServicer]") vapp_vm = VappVm(context) res = vapp_vm.power_on(request) logging.info("__DONE__PowerOn[VappVmServicer]") return res def PowerOff(self, request, context): logging.info("__INIT__PowerOff[VappVmServicer]") vapp_vm = VappVm(context) res = vapp_vm.power_off(request) logging.info("__DONE__PowerOff[VappVmServicer]") return res class VappVm: def __init__(self, context): vref = VCDClientRef() self.client = vref.get_ref() self.context = context def get_vapp_resource(self, vdc_name, vapp_name): org_resource = Org(self.client, resource=self.client.get_org()) vdc_resource = VDC( self.client, resource=org_resource.get_vdc(vdc_name)) vapp_resource_href = vdc_resource.get_resource_href( name=vapp_name, entity_type=EntityType.VAPP) return self.client.get_resource(vapp_resource_href) def create_from_catalog(self, request): logging.info("__INIT__create[VappVm] source_catalog_name[%s]", request.source_catalog_name) res = vapp_vm_pb2.CreateVappVmResult() res.created = False logged_in_org = self.client.get_org() org = Org(self.client, resource=logged_in_org) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) catalog_item = org.get_catalog_item(request.source_catalog_name, request.source_template_name) source_vapp_resource = self.client.get_resource( catalog_item.Entity.get('href')) specs = [{ 'source_vm_name': request.source_vm_name, 'vapp': source_vapp_resource, 'target_vm_name': request.target_vm_name, 'hostname': request.hostname, 'network': request.network, 'ip_allocation_mode': request.ip_allocation_mode, # 'storage_profile': request.storage_profile }] create_vapp_vm_resp = vapp.add_vms( specs, power_on=request.power_on, all_eulas_accepted=request.all_eulas_accepted) task_monitor = self.client.get_task_monitor() task = task_monitor.wait_for_status( task=create_vapp_vm_resp, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmCreateError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) res.created = True except Exception as e: errmsg = '''__ERROR_create[VappVm] failed for vm {0}. __ErrorMessage__ {1}''' logging.warn(errmsg.format(request.target_vm_name, str(e))) self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) self.context.set_details(errmsg) return res logging.info("__DONE__create[VappVm]") return res def create_from_vapp(self, request): logging.info("__INIT__create[VappVm] source_catalog_name[%s]", request.source_vapp) res = vapp_vm_pb2.CreateVappVmResult() res.created = False source_vapp_resource = self.get_vapp_resource( request.target_vdc, vapp_name=request.source_vapp) target_vapp_resource = self.get_vapp_resource( request.target_vdc, vapp_name=request.target_vapp) specs = [{ 'vapp': source_vapp_resource, 'source_vm_name': request.source_vm_name, 'target_vm_name': request.target_vm_name, 'hostname': request.hostname, 'password': request.password, 'password_auto': request.password_auto, 'password_reset': request.password_reset, 'cust_script': request.cust_script, 'network': request.network, # 'storage_profile': request.storage_profile }] try: vapp = VApp(self.client, resource=target_vapp_resource) create_vapp_vm_resp = vapp.add_vms( specs, power_on=request.power_on, all_eulas_accepted=request.all_eulas_accepted) task_monitor = self.client.get_task_monitor() task = task_monitor.wait_for_status( task=create_vapp_vm_resp, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmCreateError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) res.created = True except Exception as e: errmsg = '''__ERROR_create[VappVm] failed for vm {0}. __ErrorMessage__ {1}''' logging.warn(errmsg.format(request.target_vm_name, str(e))) self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) self.context.set_details(errmsg) return res logging.info("__DONE__create[VappVm]") return res # def update(self): # logging.info("__INIT__update[VappVm]") # res = vapp_vm_pb2.UpdateUserResult() # res.updated = False # logged_in_org = self.client.get_org() # org = Org(self.client, resource=logged_in_org) # name = self.name # is_enabled = self.is_enabled # try: # result = org.update_user(name, is_enabled) # res.updated = True # except Exception as e: # error_message = '__ERROR_update[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}'.format( # self.name, str(e)) # logging.warn(error_message) # self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) # self.context.set_details(error_message) # return res # logging.info("__DONE__update[VappVm]") # return res def read(self, request): logging.info("__INIT__read[VappVm]") res = vapp_vm_pb2.ReadVappVmResult() res.present = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) read_vapp_vm_resp = vapp.get_vm(request.target_vm_name) vm = VM(client=self.client, href=None, resource=read_vapp_vm_resp) res.present = True except Exception as e: errmsg = '__ERROR_read[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) return res logging.info("__DONE__read[VappVm]") return res def delete(self, request): logging.info("__INIT__delete[VappVm]") res = vapp_vm_pb2.DeleteVappVmResult() res.deleted = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) # Before deleting power_off vm # self.power_off(request.target_vdc, request.target_vapp) # Before deleting undeploy vm self.undeploy(request) vms = [request.target_vm_name] delete_vapp_vm_resp = vapp.delete_vms(vms) task = self.client.get_task_monitor().wait_for_status( task=delete_vapp_vm_resp, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmDeleteError( etree.tostring(task, pretty_print=True)) message = 'delete vapp_vm status : {0} '.format(st) logging.info(message) res.deleted = True except Exception as e: res.deleted = False errmsg = '__ERROR_delete[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) self.context.set_details(errmsg) return res logging.info("__DONE__delete[VappVm]") return res def power_off(self, request): logging.info("__INIT__power_off[VappVm]") res = vapp_vm_pb2.PowerOffVappVmResult() res.powered_off = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) vapp_vm_resource = vapp.get_vm(request.target_vm_name) vm = VM(self.client, resource=vapp_vm_resource) power_off_response = vm.undeploy() task = self.client.get_task_monitor().wait_for_status( task=power_off_response, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmCreateError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) powered_off = True except Exception as e: errmsg = '__ERROR_power_off[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) logging.info("__DONE__power_off[VappVm]") return powered_off def power_on(self, request): logging.info("__INIT__power_on[VappVm]") res = vapp_vm_pb2.PowerOnVappVmResult() res.powered_on = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) vapp_vm_resource = vapp.get_vm(request.target_vm_name) vm = VM(self.client, resource=vapp_vm_resource) power_on_response = vm.power_on() task = self.client.get_task_monitor().wait_for_status( task=power_on_response, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmPowerOnError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) res.powered_on = True except Exception as e: errmsg = '__ERROR_power_off[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) self.context.set_details(errmsg) logging.info("__DONE__power_on[VappVm]") return res def undeploy(self, request): logging.info("__INIT__undeploy[VappVm]") undeploy = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) vapp_vm_resource = vapp.get_vm(request.target_vm_name) vm = VM(self.client, resource=vapp_vm_resource) undeploy_response = vm.undeploy() task = self.client.get_task_monitor().wait_for_status( task=undeploy_response, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmCreateError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) undeploy = True except Exception as e: errmsg = '__ERROR_undeploy[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) logging.info("__DONE__undeploy[VappVm]") return undeploy def modify_cpu(self, request): logging.info("__INIT__modify_cpu[VappVm]") res = vapp_vm_pb2.ModifyVappVmCPUResult() res.modified = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) vapp_vm_resource = vapp.get_vm(request.target_vm_name) vm = VM(self.client, resource=vapp_vm_resource) self.undeploy(request) modify_cpu_response = vm.modify_cpu(request.virtual_cpus, request.cores_per_socket) task = self.client.get_task_monitor().wait_for_status( task=modify_cpu_response, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmModifyCPUError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) self.power_on(request) res.modified = True except Exception as e: errmsg = '__ERROR_modify_cpu[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) self.context.set_details(errmsg) logging.info("__DONE__modify_cpu[VappVm]") return res def modify_memory(self, request): logging.info("__INIT__modify_memory[VappVm]") res = vapp_vm_pb2.ModifyVappVmMemoryResult() res.modified = False org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) vapp_vm_resource = vapp.get_vm(request.target_vm_name) vm = VM(self.client, resource=vapp_vm_resource) self.undeploy(request) modify_memory_response = vm.modify_memory(request.memory) task = self.client.get_task_monitor().wait_for_status( task=modify_memory_response, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmModifyMemoryError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) self.power_on(request) res.modified = True except Exception as e: errmsg = '__ERROR_modify_memory[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT) self.context.set_details(errmsg) logging.info("__DONE__modify_memory[VappVm]") return res def reload_vm(self, request): logging.info("__INIT__reload_vm[VappVm]") org_resource = self.client.get_org() org = Org(self.client, resource=org_resource) try: vdc_resource = org.get_vdc(request.target_vdc) vdc = VDC( self.client, name=request.target_vdc, resource=vdc_resource) vapp_resource = vdc.get_vapp(request.target_vapp) vapp = VApp( self.client, name=request.target_vapp, resource=vapp_resource) vapp_vm_resource = vapp.get_vm(request.target_vm_name) vm = VM(self.client, resource=vapp_vm_resource) reload_vm_response = vm.reload() task = self.client.get_task_monitor().wait_for_status( task=reload_vm_response, timeout=60, poll_frequency=2, fail_on_statuses=None, expected_target_statuses=[ TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR, TaskStatus.CANCELED ], callback=None) st = task.get('status') if st != TaskStatus.SUCCESS.value: raise errors.VappVmReloadError( etree.tostring(task, pretty_print=True)) message = 'status : {0} '.format(st) logging.info(message) except Exception as e: errmsg = '__ERROR_reload_vm[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}' logging.warn(errmsg.format(request.target_vm_name, str(e))) raise errors.VappVmReloadError( etree.tostring(errmsg, pretty_print=True)) logging.info("__DONE__reload_vm[VappVm]")
"""Primary class defining conversion of experiment-specific behavior.""" from nwb_conversion_tools import BaseDataInterface class MyEcephysBehaviorInterface(BaseRecordingExtractorInterface): """My behavior interface specific to the ecephys experiments.""" def __init__(self): # Point to data pass def get_metadata(self): # Automatically retrieve as much metadata as possible pass def run_conversion(self): # All the custom code to write through PyNWB pass
""" 1914. Cyclically Rotating a Grid Medium You are given an m x n integer matrix gridโ€‹โ€‹โ€‹, where m and n are both even integers, and an integer k. The matrix is composed of several layers, which is shown in the below image, where each color is its own layer: A cyclic rotation of the matrix is done by cyclically rotating each layer in the matrix. To cyclically rotate a layer once, each element in the layer will take the place of the adjacent element in the counter-clockwise direction. An example rotation is shown below: Return the matrix after applying k cyclic rotations to it. Example 1: Input: grid = [[40,10],[30,20]], k = 1 Output: [[10,20],[40,30]] Explanation: The figures above represent the grid at every state. Example 2: Input: grid = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]], k = 2 Output: [[3,4,8,12],[2,11,10,16],[1,7,6,15],[5,9,13,14]] Explanation: The figures above represent the grid at every state. Constraints: m == grid.length n == grid[i].length 2 <= m, n <= 50 Both m and n are even integers. 1 <= grid[i][j] <= 5000 1 <= k <= 109 """ # V0 # V1 # https://www.jianshu.com/p/b0b3391af7b3 class Solution: def rotateGrid(self, grid, k): m = len(grid) n = len(grid[0]) result = [[0] * n for _ in range(m)] for i in range(min(m // 2, n // 2)): circle = [] # Left column circle += [(j, i) for j in range(i, m-i)] # Bottom row circle += [(m-1-i, j) for j in range(i+1, n-i)] # Right column circle += [(j, n-1-i) for j in range(m-2-i, i-1, -1)] # Top row circle += [(i, j) for j in range(n-2-i, i, -1)] for index, (x, y) in enumerate(circle): target_x, target_y = circle[(index+k) % len(circle)] result[target_x][target_y] = grid[x][y] return result # V1' # https://blog.csdn.net/Quincuntial/article/details/119783736 # V1'' # https://leetcode.com/problems/cyclically-rotating-a-grid/discuss/1299584/Python-Easy-to-understand-matrix-rotation import math class Solution: def rotateGrid(self, grid, k): def recurData(arr, i, j, m, n): start = arr[i][n-1] # If i or j lies outside the matrix if (i >= m or j >= n): return # Print First Row for p in range(n-1, i, -1): #print(start) #print(arr[i][p]) temp = arr[i][p] arr[i][p] = start start = temp #print(arr[i][p], end=" ") # Print First Column, if Last and # First Column are not same if ((n - 1) != j): for p in range(i, m-1): temp = arr[p][j] arr[p][j] = start start = temp # Print Last Row, if Last and # First Row are not same if ((m - 1) != i): for p in range(j, n-1): temp = arr[m - 1][p] arr[m - 1][p] = start start = temp # Print Last Column for p in range(m-1, i, -1): temp = arr[p][n - 1] arr[p][n - 1] = start start = temp arr[i][n-1] = start #r = k % (2*(m-1)+2*(n-1)) #recurData(arr, i + 1, j + 1, m - 1, n - 1) #recurData(grid, 0, 0, len(grid), len(grid[0])) x, y = len(grid), len(grid[0]) level = math.ceil(min(x, y)/2) for l in range(int(level)): m, n = x-2*l, y-2*l r = k % (2*m+2*n-4) #print(r) for i in range(r): recurData(grid, l, l, x-l, y-l) return grid # V1''' # https://leetcode.com/problems/cyclically-rotating-a-grid/discuss/1316844/Python-or-Faster-Than-96-or-With-Comments class Solution: def assign(self, temp, rows, cols, i, j, arr, topL, topR, bottomR, bottomL): ix = 0 # top row while j < topR[1]: temp[i][j] = arr[ix] ix += 1 j += 1 # last column while i < bottomR[0]: temp[i][j] = arr[ix] ix += 1 i += 1 # last row while j > bottomL[1]: temp[i][j] = arr[ix] ix += 1 j -= 1 # first column while i > topR[0]: temp[i][j] = arr[ix] ix += 1 i -= 1 def rotateGrid(self, grid, k): rows, cols, i, j = len(grid), len(grid[0]), 0, 0 # Marking the 4 points, which will act as boundaries topLeft, topRight, bottomRight, bottomLeft = [0,0],[0,cols-1],[rows-1, cols-1],[rows-1, 0] temp = [[-1 for _ in range(cols)] for __ in range(rows) ] while topLeft[0] < rows//2 and topLeft[0] < cols//2: arr = [] # top row while j < topRight[1]: arr.append(grid[i][j]) j += 1 # last column while i < bottomRight[0]: arr.append(grid[i][j]) i += 1 # last row while j > bottomLeft[1]: arr.append(grid[i][j]) j -= 1 # first column while i > topRight[0]: arr.append(grid[i][j]) i -= 1 n = len(arr) arr = arr[k % n:] + arr[:k % n] # Taking modulus value self.assign(temp, rows, cols, i, j, arr,topLeft, topRight, bottomRight, bottomLeft ) i += 1 j += 1 topLeft[0] += 1 topLeft[1] += 1 topRight[0] += 1 topRight[1] -= 1 bottomRight[0] -= 1 bottomRight[1] -= 1 bottomLeft[0] -= 1 bottomLeft[1] += 1 return temp # V1'''' # https://leetcode.com/problems/cyclically-rotating-a-grid/discuss/1359971/This-was-a-tough-question!-Horrible-python-solution-that-works class Solution(object): def plus(self, num): return num + 1 def minus(self, num): return num - 1 def nothing(self, num): return num def get_all_for_layer(self, layer=0): try: cycle = [] x, y = layer, layer currentPosition = self.grid[x][y] order = [(self.plus, self.nothing), (self.nothing, self.plus), (self.minus, self.nothing), (self.nothing, self.minus)] while (x, y) not in cycle: try: if (x, y) in self.visited or x < 0 or y < 0: x = max(0, x) y = max(0, y) raise Exception("AYYY") self.grid[order[0][0](x)][order[0][1](y)] if (x, y) not in self.visited and len(cycle) == 0 or ([x, y] != cycle[0] and [x, y] != cycle[-1]): cycle.append([x, y]) if (order[0][0](x), order[0][1](y)) in self.visited: raise Exception("AYYY") x, y = order[0][0](x), order[0][1](y) except Exception as exp: order.pop(0) if len(order) == 0: break return cycle except: return [] def new_values_for_layer(self, grid, layer, amount_rotated): values = self.get_all_for_layer(layer) og_mapping = [{'prev': x} for x in values] if len(values) == 0: return values for i in range(amount_rotated % len(values)): x = values.pop(-1) values.insert(0, x) for i, val in enumerate(values): og_mapping[i]['new'] = val v = og_mapping[i] self.visited.add(tuple(v['new'])) prev_x, prev_y = v['prev'] new_x, new_y = v['new'] self.og_grid[prev_x][prev_y] = self.grid[new_x][new_y] return og_mapping def rotateGrid(self, grid, k): self.visited = set() self.grid = grid self.og_grid = [list(x) for x in grid] vals = [] gridLayers = max(len(grid), len(grid[0])) / 2 for layer in xrange(gridLayers): self.new_values_for_layer(grid, layer, k) return self.og_grid # V1''''' # https://leetcode.com/problems/cyclically-rotating-a-grid/discuss/1379013/simple-python-solution-beat-97.9 class Solution: def rotateGrid(self, grid: List[List[int]], k: int) -> List[List[int]]: m, n = len(grid), len(grid[0]) def get_list(layer, mm, nn): res = [] for i in range(nn): res.append(grid[layer][layer + i]) for i in range(1, mm - 1): res.append(grid[layer + i][layer + nn - 1]) for i in range(nn - 1, -1, -1): res.append(grid[layer + mm - 1][layer + i]) for i in range(mm - 2, 0, -1): res.append(grid[layer + i][layer]) return res def set_list(li, layer, mm, nn): idx = 0 for i in range(nn): grid[layer][layer + i] = li[idx] idx += 1 for i in range(1, mm - 1): grid[layer + i][layer + nn - 1] = li[idx] idx += 1 for i in range(nn - 1, -1, -1): grid[layer + mm - 1][layer + i] = li[idx] idx += 1 for i in range(mm - 2, 0, -1): grid[layer + i][layer] = li[idx] idx += 1 def helper(layer, step): mm, nn = m - 2 * layer, n - 2 * layer li = get_list(layer, mm, nn) li = li[step:] + li[:step] set_list(li, layer, mm, nn) for i in range(min(m, n) // 2): helper(i, k % (2 * (m + n - 4 * i - 2))) return grid # V2
r"""Functions related to player motion under gravity. Typically you would want to compute *K* using the :py:meth:`strafe_K` or :py:func:`strafe_K_std` function, which is needed by many of the functions in this module. All functions in this module ignore the :math:`\Theta = 0` strafing corresponding to :math:`L - k_e \tau MA > \lVert\mathbf{v}\rVert`. The user is responsible of verifying this assumption, which is not hard to do. In addition, these routines in this module assume: - continuous time - no anglemod - strafing along a perfect straight line These assumptions make implementations much less complex, less error prone, and less information needed to perform the computations. Consult the Half-Life physics documentation at https://www.jwchong.com/hl/. """ import math import scipy.optimize as opt from pystrafe import common jumpspeed = 268.32815729997476 jumpspeedlj = 299.33259094191531 def strafe_K(L, tau, M, A): """Compute *K* based on strafing parameters. This function does not accept negative parameters. >>> strafe_K(30, 0.001, 320, 10) 181760.0 """ if L < 0 or tau < 0 or M < 0 or A < 0: raise ValueError('parameters must be > 0') L = min(L, M) LtauMA = L - tau * M * A if LtauMA <= 0: return L * L / tau return M * A * (L + LtauMA) def strafe_K_std(tau): r"""Compute K based on Half-Life default settings and airstrafing. The default settings refer to :math:`L = 30`, :math:`M = 320`, and :math:`A = 10`. This function is therefore equivalent to calling :py:func:`strafe_K` with these values as the parameters. For example, >>> strafe_K_std(0.01) == strafe_K(30, 0.01, 320, 10) True """ return strafe_K(30, tau, 320, 10) def strafe_speedxf(t, speed, K): """Compute the speed after strafing for *t* seconds. >>> K = strafe_K(30, 0.01, 320, 100) >>> '{:.10g}'.format(strafe_speedxf(3.5, 10, K)) '561.337688' >>> strafe_speedxf(338.13, 387.4, 0) 387.4 >>> strafe_speedxf(0, 505, 505) 505.0 """ if K < 0: raise ValueError('K must be > 0') return math.sqrt(speed * speed + t * K) def strafe_distance(t, speed, K): """Compute the distance after strafing for *t* seconds. The returned distance is not well defined for negative *t* and *speed*. Note that this is a constant-time continuous time approximation to the true distance a player would have travelled in Half-Life. Specifically, the Euler-Maclaurin formula is used to approximate the discrete sum of square roots, truncated to :math:`O(t^{-1/2})` accuracy. This approximation is good even at lower frame rates. >>> K = strafe_K_std(0.001) >>> '{:.10g}'.format(strafe_distance(2.5, 400, K)) '1531.650819' >>> '{:.10g}'.format(strafe_distance(1.3, 0, K)) '421.2820222' >>> strafe_distance(0, 5000, K) 0.0 >>> strafe_distance(5000, 5000, 0) 25000000.0 """ if K < 0: raise ValueError('K must be > 0') speed = math.fabs(speed) if common.float_equal(K, 0.0): return speed * t speedsq = speed * speed ret = ((speedsq + t * K) ** 1.5 - speedsq * speed) / (1.5 * K) if isinstance(ret, complex): raise ValueError('math domain error') return math.fabs(ret) def strafe_time(x, speedxi, K): """Compute the time it takes to strafe for the given distance and initial speed. Always returns positive times. >>> vix = 400 >>> x = 1000 >>> K = strafe_K(30, 0.01, 320, 10) >>> tx = strafe_time(x, vix, K) >>> math.isclose(x, strafe_distance(tx, vix, K)) True """ if K < 0: raise ValueError('K must be > 0') speedxi = math.fabs(speedxi) x = math.fabs(x) if common.float_zero(x): return 0.0 if common.float_zero(K): try: return x / speedxi except ZeroDivisionError: return math.inf sq = speedxi * speedxi ret = ((sq * speedxi + 1.5 * K * x) ** (2 / 3) - sq) / K # ret < 0 can occur from the subtraction with small x and big speedxi return max(ret, 0.0) def gravity_speediz_distance_time(t, z, g): """Compute the initial speed needed to travel to the given ``z`` position. z can be negative. """ if common.float_zero(t) and common.float_zero(z): raise ValueError('indeterminate') try: return (0.5 * g * t * t + z) / t except ZeroDivisionError: return math.copysign(math.inf, z) def gravity_time_speediz_z(speedzi, z, g): """Compute the time it takes to reach a height given initial vertical velocity. z can be negative. >>> viz = 200 >>> z = 10 >>> g = 800 >>> t1, t2 = gravity_time_speediz_z(viz, z, g) >>> '{:.10g} {:.10g}'.format(t1, t2) '0.05635083269 0.4436491673' >>> math.isclose(z, viz * t1 - 0.5 * g * t1 * t1) True >>> math.isclose(z, viz * t2 - 0.5 * g * t2 * t2) True """ if common.float_zero(g): t = z / speedzi return t, t sqrt_tmp = math.sqrt(speedzi * speedzi - 2 * g * z) t1 = (speedzi - sqrt_tmp) / g t2 = (speedzi + sqrt_tmp) / g return t1, t2 def strafe_solve_speedxi(speedzi, K, x, z, g): """Compute the initial horizontal speed needed to reach the final position. z can be negative. Automatically handles both cases where the final velocity is positive or negative. If the time it takes to reach the vertical position is longer or equal to the *maximum* time it takes the reach the horizontal position (calculated by setting the initial horizontal speed to zero), the function will simply return 0.0, indicating that strafing from zero speed is enough to reach the final horizontal position *sooner than required*. In such a case, the user needs to manually "slow down" the strafing, by taking a longer path in 3D space, stop strafing altogether at some point, or "backpedalling" in air, so that both the horizontal and vertical positions can hit the final position exactly at the same time. In other words, the user must "wait" for the vertical position to move up to the final position before the horizontal position should hit it. The result is computed using the ``brentq`` function provided by scipy. """ if K < 0: raise ValueError('K must be > 0') sqrt_tmp = math.sqrt(speedzi * speedzi - 2 * g * z) tz = speedzi - sqrt_tmp if tz < 0: tz = speedzi + sqrt_tmp if tz < 0: return math.nan tz /= g x = math.fabs(x) txmax = (1.5 * x) ** (2 / 3) * K ** (-1 / 3) if common.float_zero(txmax): return 0.0 if common.float_equal(txmax, tz) or txmax < tz: return 0.0 elif common.float_zero(tz): return math.inf # The upper bound of x / tz is the minimum _constant_ speed needed tmp = 1.5 * K * x return opt.brentq( lambda v: ((v ** 3 + tmp) ** (2 / 3) - v ** 2) / K - tz, 0, x / tz) def solve_boost_min_dmg(vi, K, x, z, g): """Compute the speed boost that minimises health loss. The minimisation is done using the ``minimize_scalar`` function provided by scipy running the Brent's algorithm. The resulting curve tends to end with a negative vertical velocity. >>> vi = [100, 268] >>> K = strafe_K(30, 0.001, 320, 10) >>> x, z = 400, 500 >>> g = 800 >>> dv = solve_boost_min_dmg(vi, K, x, z, g) >>> '{:.5g} {:.5g}'.format(dv[0], dv[1]) '27.394 627.83' >>> tz1, tz2 = gravity_time_speediz_z(vi[1] + dv[1], z, g) >>> tx = strafe_time(x, vi[0] + dv[0], K) >>> math.isclose(tz2, tx) True >>> vi[1] + dv[1] - g * tz2 < 0 True """ if K < 0: raise ValueError('K must be > 0') def compute_dy(dx): tx = strafe_time(x, vix + dx, K) try: dy = gravity_speediz_distance_time(tx, z, g) - vi[1] except ValueError: dy = 0.0 # max to simulate inequality constraint, where the final position # being above the minimum permissible is sufficient return max(dy, 0.0) def fun(dx): dy = compute_dy(dx) return dx * dx + dy * dy x = math.fabs(x) vix = math.fabs(vi[0]) res = opt.minimize_scalar(fun) dx = max(res.x, 0.0) dy = compute_dy(dx) return [dx, dy]
from Nodes.TemplateNode import TemplateNode from ryven.NENV import * import code from contextlib import redirect_stdout, redirect_stderr #this is important to ensure the path is set to the currect directory so it can find the other nodes import sys import os from os import walk import glob # open cv import cv2 import plyfile from plyfile import PlyData, PlyElement import numpy as np sys.path.append(os.path.dirname(__file__)) from Nodes.HogueTest import Hogue widgets = import_widgets(__file__) class GetFilename(Node): title = 'GetFilename' input_widget_classes = { 'path input': widgets.FileInput } init_inputs = [ NodeInputBP('path', add_data={'widget name': 'path input', 'widget pos': 'below'}), ] init_outputs = [ NodeInputBP(), ] def __init__(self, params): super().__init__(params) self.active = False self.filepath = '' self.actions['make executable'] = {'method': self.action_make_executable} def view_place_event(self): self.input_widget(0).path_chosen.connect(self.path_chosen) def path_chosen(self, new_path): print('path chosen') self.filepath = new_path self.update() def action_make_executable(self): self.create_input(type_='exec', insert=0) self.active = True del self.actions['make executable'] self.actions['make passive'] = {'method': self.action_make_passive} def action_make_passive(self): self.delete_input(0) self.active = False del self.actions['make passive'] self.actions['make executable'] = {'method': self.action_make_executable} def update_event(self, inp=-1): print('update event') self.set_output_val(0,self.filepath) def get_state(self): print('get state') return { **super().get_state(), 'path': self.filepath } def set_state(self, data, version): print('set state') self.filepath = data['path'] self.set_output_val(0,self.filepath) class GetDirname(Node): title = 'GetDirname' input_widget_classes = { 'path input': widgets.PathInput } init_inputs = [ NodeInputBP('path', add_data={'widget name': 'path input', 'widget pos': 'below'}), ] init_outputs = [ NodeInputBP(), ] def __init__(self, params): super().__init__(params) self.active = False self.filepath = '' self.actions['make executable'] = {'method': self.action_make_executable} def view_place_event(self): self.input_widget(0).path_chosen.connect(self.path_chosen) def path_chosen(self, new_path): print('path chosen') self.filepath = new_path self.update() def action_make_executable(self): self.create_input(type_='exec', insert=0) self.active = True del self.actions['make executable'] self.actions['make passive'] = {'method': self.action_make_passive} def action_make_passive(self): self.delete_input(0) self.active = False del self.actions['make passive'] self.actions['make executable'] = {'method': self.action_make_executable} def update_event(self, inp=-1): print('update event') self.set_output_val(0,self.filepath) def get_state(self): print('get state') return { **super().get_state(), 'path': self.filepath } def set_state(self, data, version): print('set state') self.filepath = data['path'] self.set_output_val(0,self.filepath) class Print_Node(Node): title = 'Button' version = 'v0.1' main_widget_class = widgets.ButtonNode_MainWidget main_widget_pos = 'between ports' init_inputs = [ ] init_outputs = [ NodeOutputBP() ] color = '#99dd55' def __init__(self, params): super().__init__(params) self.value = False def update_event(self, inp=-1): if self.value == False : self.value = True else : self.value = False self.set_output_val(0,self.value) class Button_Node(Node): title = 'Button' version = 'v0.1' main_widget_class = widgets.ButtonNode_MainWidget main_widget_pos = 'between ports' init_inputs = [ ] init_outputs = [ NodeOutputBP() ] color = '#99dd55' def __init__(self, params): super().__init__(params) self.value = False def update_event(self, inp=-1): if self.value == False : self.value = True else : self.value = False self.set_output_val(0,self.value) class GLNode(Node): """Prints your data""" # all basic properties title = 'OpenGL View' init_inputs = [ NodeInputBP(dtype=dtypes.Float(1.0), label='rX'), NodeInputBP(dtype=dtypes.Float(1.0), label='rY'), NodeInputBP(dtype=dtypes.Float(1.0), label='rZ'), NodeInputBP(dtype=dtypes.Boolean(False), label='anim'), ] color = '#A9D5EF' main_widget_class = widgets.Custom_MainWidget # see API doc for a full list of all properties # we could also skip the constructor here def __init__(self, params): super().__init__(params) self.rX = 1.0 self.rY = 1.0 self.rZ = 1.0 self.animating = False def update_event(self, inp=-1): self.rX = self.input(0) self.rY = self.input(1) self.rZ = self.input(2) if self.input(3) == True : self.animating = True else : self.animating = False self.update() class _DynamicPorts_Node(Node): version = 'v0.1' init_inputs = [] init_outputs = [] def __init__(self, params): super().__init__(params) self.actions['add input'] = {'method': self.add_inp} self.actions['add output'] = {'method': self.add_out} self.num_inputs = 0 self.num_outputs = 0 def add_inp(self,theLabel=''): self.create_input(label=theLabel) index = self.num_inputs self.actions[f'remove input {index}'] = { 'method': self.remove_inp, 'data': index } self.num_inputs += 1 def rename_inp(self, index, label): self.rename_input(index, label) def remove_inp(self, index): self.delete_input(index) self.num_inputs -= 1 del self.actions[f'remove input {self.num_inputs}'] def add_out(self, theLabel=''): self.create_output(label=theLabel) index = self.num_outputs self.actions[f'remove output {index}'] = { 'method': self.remove_out, 'data': index } self.num_outputs += 1 def clearout(self): for i in range(0,self.num_outputs,1) : remove_out(i) def remove_out(self, index): self.delete_output(index) self.num_outputs -= 1 del self.actions[f'remove output {self.num_outputs}'] def get_state(self) -> dict: return { 'num inputs': self.num_inputs, 'num outputs': self.num_outputs, } def set_state(self, data: dict): print(data) self.num_inputs = data['num inputs'] self.num_outputs = data['num outputs'] print('dyn: num outputs:'+self.num_outputs) class Show_Node(_DynamicPorts_Node): title = 'ShowNode' version = 'v0.1' main_widget_class = widgets.CodeNode_MainWidget main_widget_pos = 'between ports' init_inputs = [ NodeInputBP(), ] def __init__(self, params): super().__init__(params) self.code = "" def place_event(self): pass def update_event(self, inp=-1): self.code = str(self.input(0)) print('update:' + self.code) if self.session.gui and self.main_widget(): self.main_widget().update_text(self.code) def get_state(self) -> dict: return { **super().get_state(), 'code': self.code, } def set_state(self, data: dict, version): super().set_state(data, version) print('set_state:' + self.code) class Livescan3dDir(_DynamicPorts_Node): title = 'Livescan3d Dir' input_widget_classes = { 'path input': widgets.PathInput } init_inputs = [ NodeInputBP('path', add_data={'widget name': 'path input', 'widget pos': 'below'}), ] def __init__(self, params): super().__init__(params) self.active = False self.dirpath = '' self.actions['make executable'] = {'method': self.action_make_executable} super().clearout() def place_event(self): self.update() def view_place_event(self): self.input_widget(0).path_chosen.connect(self.path_chosen) def parseDir(self): print('parseDir') super().clearout() clientPat = self.dirpath + '/client_*' self.clients = glob.glob(clientPat) self.numClients = len(self.clients) extrinsicsPat = self.dirpath + '/Extrinsics*.log' self.extrinsicsLogName = glob.glob(extrinsicsPat)[0] print(self.clients) print(self.numClients) i=0 for (clientname) in self.clients : theName = clientname tokens = clientname.split("client_") labelName = "c" + tokens[1] super().add_out(labelName) super().set_output_val(i,theName) i = i + 1 super().add_out('Extrin') super().set_output_val(i,self.extrinsicsLogName) def path_chosen(self, new_path): print('path chosen') self.dirpath = new_path self.parseDir() self.update() def action_make_executable(self): self.create_input(type_='exec', insert=0) self.active = True del self.actions['make executable'] self.actions['make passive'] = {'method': self.action_make_passive} def action_make_passive(self): self.delete_input(0) self.active = False del self.actions['make passive'] self.actions['make executable'] = {'method': self.action_make_executable} def update_event(self, inp=-1): print('update event') #i=0 #for (clientname) in self.clients : # self.set_output_val(i, clientname) # i = i + 1 #self.set_output_val(i, self.extrinsicsLogName) def get_state(self): print('get state - saving?') return { **super().get_state(), 'path': self.dirpath, 'extrin': self.extrinsicsLogName, 'numClients':self.numClients } def set_state(self, data, version): print('set state - loading?') #super().set_state(data, version) self.dirpath = data['path'] print('dirpath'+self.dirpath) clientPat = self.dirpath + '/client_*' print('clientpat:'+clientPat) self.clients = glob.glob(clientPat) print(self.clients) self.numClients = data['numClients'] print(self.numClients) self.extrinsicsLogName = data['extrin'] i=0 for (clientname) in self.clients : theName = clientname tokens = clientname.split("client_") labelName = "c" + tokens[1] super().set_output_val(i,theName) i = i + 1 super().set_output_val(i,self.extrinsicsLogName) print('done') class CameraDirNode(_DynamicPorts_Node): title = 'CameraDirNode' init_inputs = [ NodeInputBP(), NodeInputBP('index',dtype=dtypes.Integer(default=0, bounds=(1, 9999))), ] #init_outputs = [ # NodeOutputBP(label='Intrinsics'), # NodeOutputBP(label='RGB'), # NodeOutputBP(label='Depth'), # NodeOutputBP(label='Matte'), #] def __init__(self, params): super().__init__(params) self.dir = "" self.index = 0 self.dict = None super().add_out('Intrinsics') super().add_out('RGB') super().add_out('Depth') super().add_out('Matte') super().add_out('Dict') self.pin_intrin = 0 self.pin_rgb = 1 self.pin_depth = 2 self.pin_matte = 3 self.pin_dict = 4 def resetPins(self) : for i in range(0,4,1) : super().set_output_val(i,None) def setOutputPinImageName(self, pinIndex, imName) : v = glob.glob(imName) print(v) if len(v) == 0 : return False else : value = v[0] super().set_output_val(pinIndex,value) return True def parseDir(self): #print("parse") #print(self.dir) #print(self.index) self.resetPins() pat = self.dir + "\\Intrinsics*.json" intrinMatch = glob.glob(pat) if len(intrinMatch) != 0 : intrin = intrinMatch[0] indS = str(self.index) rgbIm = self.dir + "\\Color_"+indS+".jpg" depthIm = self.dir + "\\Depth_"+indS+".tiff" matPat = rgbIm + ".matte.png" matPat2 = self.dir + "\\Color_"+indS+".matte.png" self.setOutputPinImageName(self.pin_intrin, intrin) self.setOutputPinImageName(self.pin_rgb, rgbIm) self.setOutputPinImageName(self.pin_depth, depthIm) #sometimes I have .jpg.matte.png and other times I have .matte.png ... uggh... FIX MATTE = matPat t = self.setOutputPinImageName(self.pin_matte, matPat) if t == False : self.setOutputPinImageName(self.pin_matte,matPat2) MATTE = matPat2 # setup dictionary pin self.dict = {'intrinsics':intrin, 'rgb':rgbIm, 'depth':depthIm, 'matte':MATTE, 'frame':self.index} super().set_output_val(self.pin_dict, self.dict) def update_event(self, inp=-1): print('update') self.dir = self.input(0) self.index = self.input(1) self.parseDir() #-----START OPENCV STUFF ----------------------------------- class CVImage: """ The OpenCV Mat(rix) data type seems to have overridden comparison operations to perform element-wise comparisons which breaks ryverncore-internal object comparisons. To avoid this, I'll simply use this wrapper class and recreate a new object every time for now, so ryvencore doesn't think two different images are the same. """ def __init__(self, img=None): self.img = img self.dtype = np.dtype('uint8') class ReadImage(Node): """Reads an image from a file""" title = 'Read Image' init_inputs = [ NodeInputBP(), NodeInputBP('isDepth', dtype=dtypes.Boolean(False)), ] init_outputs = [ NodeOutputBP('img') ] def __init__(self, params): super().__init__(params) self.image_filepath = '' self.isDepth = False def view_place_event(self): self.image_filepath = self.input(0) self.isDepth = self.input(1) self.update() #self.input_widget(0).path_chosen.connect(self.path_chosen) # self.main_widget_message.connect(self.main_widget().show_path) def update_event(self, inp=-1): self.image_filepath = self.input(0) self.isDepth = self.input(1) if self.image_filepath == '': return try: if self.isDepth : theImage = CVImage(cv2.imread(self.image_filepath, cv2.IMREAD_ANYDEPTH)) theImage.dtype = np.dtype('uint16') print(theImage.dtype) self.set_output_val(0, theImage) else : theImage = CVImage(cv2.imread(self.image_filepath, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_ANYCOLOR)) theImage.dtype = np.dtype('uint8') print(theImage.dtype) self.set_output_val(0,theImage) except Exception as e: print(e) def get_state(self): data = {'image file path': self.image_filepath} return data def set_state(self, data, version): #self.path_chosen(data['image file path']) self.image_filepath = self.input(0) class OpenCVNodeBase(Node): init_outputs = [ NodeOutputBP() ] main_widget_class = widgets.OpenCVNode_MainWidget main_widget_pos = 'below ports' def __init__(self, params): super().__init__(params) if self.session.gui: from qtpy.QtCore import QObject, Signal class Signals(QObject): new_img = Signal(object) # to send images to main_widget in gui mode self.SIGNALS = Signals() def view_place_event(self): self.SIGNALS.new_img.connect(self.main_widget().show_image) try: self.SIGNALS.new_img.emit(self.get_img()) except: # there might not be an image ready yet pass def update_event(self, inp=-1): new_img_wrp = CVImage(self.get_img()) if self.session.gui: self.SIGNALS.new_img.emit(new_img_wrp.img) self.set_output_val(0, new_img_wrp) def get_img(self): return None class DisplayImg(OpenCVNodeBase): title = 'Display Image' init_inputs = [ NodeInputBP('img'), ] def get_img(self): return self.input(0).img #-----END OPENCV STUFF ----------------------------------- #-------MATTE EXTRACTOR NODE--------------- import torch import torchvision.transforms to_tensor = torchvision.transforms.ToTensor to_pil_image = torchvision.transforms.ToPILImage from PIL import Image class MatteExtractor(Node): title = 'MatteExtractor' version = 'v0.1' #main_widget_class = widgets.ButtonNode_MainWidget #main_widget_pos = 'between ports' # assume these are just paths to images and model init_inputs = [ NodeInputBP('ref'), NodeInputBP('img'), NodeInputBP('model'), ] init_outputs = [ NodeOutputBP() ] def __init__(self, params): super().__init__(params) def doMatteExtract(self): print('matte extract') src = Image.open(self.imgPath) bgr = Image.open(self.refPath) src = to_tensor(src).cuda().unsqueeze(0) bgr = to_tensor(bgr).cuda().unsqueeze(0) self.theModel.backbone_scale = 1/4 self.theModel.refine_sample_pixels = 80_000 pha, fgr = self.theModel(src, bgr)[:2] self.outputFileName = self.imgPath + ".matte.png" to_pil_image(pha[0].cpu()).save(self.outputFileName) self.set_output_val(0,self.outputFileName) def update_event(self, inp=-1): ni = 0 if self.input(0) != None : self.refPath = self.input(0) ni = ni+1 if self.input(1) != None : self.imgPath = self.input(1) ni = ni+1 if self.input(2) != None : self.modelPath = self.input(2) self.theModel = torch.jit.load(self.modelPath).cuda().eval() ni = ni+1 if ni == 3 : print('ready') self.doMatteExtract() class VoxelCarveTSDF(_DynamicPorts_Node): title = 'VoxelCarveTSDF' version = 'v0.1' main_widget_class = widgets.ButtonNode_MainWidget main_widget_pos = 'between ports' # assume these are just paths to images and model init_inputs = [ ] init_outputs = [ NodeOutputBP() ] def __init__(self, params): super().__init__(params) super().add_inp('Extrinsics') super().add_inp('OutputDir') super().add_inp('Cam0') self.frameNum = 0 self.numCameras = 1 self.lastPinIndex = 2 self.firstCameraPinIndex = self.lastPinIndex self.outputDirName = "." self.main_exe = '.\\bin\\simpleTSDF.exe' self.command = self.main_exe def doVoxelCarveTSDF(self): print('doVoxelCarveTSDF') print('running command:'+self.command) os.system(self.command) def doButtonPress(self): print('doButtonPress - make command') self.command = self.main_exe extrinsics = self.input(0) self.command += ' -e '+ extrinsics self.outputDirName = self.input(1) for i in range(self.firstCameraPinIndex,self.lastPinIndex,1) : print('pin(index)='+str(i)) print(self.input(i)) cDict = self.input(i) intrin = cDict['intrinsics'] rgb = cDict['rgb'] depth = cDict['depth'] matte = cDict['matte'] self.frameNum = cDict['frame'] outputPlyName = self.outputDirName+'\\output_'+str(self.frameNum)+'.ply' self.command += ' -i '+intrin self.command += ' -r '+rgb self.command += ' -d '+depth self.command += ' -m '+matte self.command += ' -o '+outputPlyName self.doVoxelCarveTSDF() self.set_output_val(0, outputPlyName) def update_event(self, inp=-1): print('tsdf: update') print('inp:'+str(inp)) if inp == self.lastPinIndex : label = "Cam"+str(inp-1) super().add_inp(label) self.numCameras = self.numCameras + 1 self.lastPinIndex = self.lastPinIndex + 1 if inp == -1 : self.doButtonPress() #ni = 0 #if self.input(0) != None : # self.cameraList = self.input(0) # ni = ni+1 #if self.input(1) != None : # self.extrinsicsPath = self.input(1) # ni = ni+1 # #if ni == 2 : # print('ready') # self.doVoxelCarveTSDF() class PlyfileRead(Node): title = 'Plyfile Read' version = 'v0.1' #main_widget_class = widgets.CustomGL_MESHWidget #main_widget_pos = 'between ports' # assume these are just paths to images and model init_inputs = [ NodeInputBP('meshFile'), ] init_outputs = [ NodeOutputBP() ] def __init__(self, params): super().__init__(params) self.modelPath = None def update_event(self, inp=-1): if self.input(0) != None : self.modelPath = self.input(0) print('reading : '+self.modelPath) self.plydata = PlyData.read(self.modelPath) print('plyfile read') print(self.plydata) self.set_output_val(0,self.plydata) class MeshIORead(Node): title = 'MeshIORead' version = 'v0.1' #main_widget_class = widgets.CustomGL_MESHWidget #main_widget_pos = 'between ports' # assume these are just paths to images and model init_inputs = [ NodeInputBP('meshFile'), ] init_outputs = [ NodeOutputBP() ] def __init__(self, params): super().__init__(params) self.modelPath = None def update_event(self, inp=-1): if self.input(0) != None : self.modelPath = self.input(0) print('reading : '+self.modelPath) self.mesh = meshio.read(self.modelPath,) print('mesh read') self.set_output_val(0,self.mesh) class GLMeshView(Node): """Prints your data""" # all basic properties title = 'GLMeshView' init_inputs = [ NodeInputBP('mesh'), ] color = '#A9D5EF' main_widget_class = widgets.Custom_MESHWidget # see API doc for a full list of all properties # we could also skip the constructor here def __init__(self, params): super().__init__(params) self.mesh = None self.isChanged = False def update_event(self, inp=-1): if self.mesh != self.input(0) : self.mesh = self.input(0) self.isChanged = True self.update() class ExtrinsicsLogParse(_DynamicPorts_Node): title = 'ExtrinsicsLogParse' #input_widget_classes = { # 'path input': widgets.FileInput #} init_inputs = [ NodeInputBP('path'), ] def __init__(self, params): super().__init__(params) self.active = False self.dirpath = '' #self.actions['make executable'] = {'method': self.action_make_executable} super().clearout() def place_event(self): self.update() def readRow(self, file): line = file.readline() tokens = line.split() if len(tokens) != 4: print('error') r = np.array([float(tokens[0]), float(tokens[1]), float(tokens[2]), float(tokens[3])], dtype=float) return r def readMatrix(self,file): print('reading matrix') r1 = self.readRow(file) r2 = self.readRow(file) r3 = self.readRow(file) r4 = self.readRow(file) m = np.stack((r1,r2,r3,r4)) return m def parseLog(self): print('parseLog') super().clearout() print('file:'+self.dirpath) file = open(self.dirpath,"r") #lines = file.readlines() count = 0 self.dict = {} while True: # first line is always int int CAMERAID line = file.readline() if not line: break tokens = line.split() CAMERAID = int(tokens[2]) # now grab the next 4 lines and parse them as a 4x4 matrix m = self.readMatrix(file) print(m) self.dict[CAMERAID] = m labelName = "c"+str(CAMERAID) super().add_out(labelName) super().set_output_val(count,m) count += 1 file.close() print("found: "+str(count)+" transforms") super().add_out('Num') super().set_output_val(count, count) super().add_out('Dict') super().set_output_val(count+1,self.dict) def path_chosen(self, new_path): print('path chosen') self.dirpath = new_path self.parseLog() self.update() def update_event(self, inp=-1): print('update event') self.dirpath = self.input(0) self.parseLog() self.update() def get_state(self): print('get state - saving?') return { **super().get_state(), 'path': self.dirpath, } def set_state(self, data, version): print('set state - loading?') ##super().set_state(data, version) #self.dirpath = data['path'] #print('dirpath'+self.dirpath) #clientPat = self.dirpath + '/client_*' #print('clientpat:'+clientPat) #self.clients = glob.glob(clientPat) #print(self.clients) #self.numClients = data['numClients'] #print(self.numClients) #self.extrinsicsLogName = data['extrin'] # #i=0 #for (clientname) in self.clients : # theName = clientname # tokens = clientname.split("client_") # labelName = "c" + tokens[1] # super().set_output_val(i,theName) # i = i + 1 #super().set_output_val(i,self.extrinsicsLogName) #print('done') #GLViwer to rule them all gptype = plyfile.PlyData([], text=True, obj_info=["test obj_info"]) class GLViewerDynamic(_DynamicPorts_Node): title = 'GLViewerALL' version = 'v0.1' # main_widget_class = widgets.ButtonNode_MainWidget # main_widget_pos = 'between ports' # assume these are just paths to images and model init_inputs = [ ] def __init__(self, params): super().__init__(params) self.default_label = "INPUT................." super().add_inp(self.default_label) self.lastPinIndex = 0 # types we can handle self.stringType = "" self.intType = 0 self.floatType = 1.5 testim = np.ones((1,1,1),np.uint8)*255 self.plyfileType = gptype self.imageType = CVImage(testim) def doButtonPress(self): print('doButtonPress - make command') def HandleInput(self, theInput) : label = "" print(type(theInput)) print(type(self.imageType)) if type(theInput) is type(self.stringType) : label = "string" elif type(theInput) is type(self.intType) : label = "int" elif type(theInput) is type(self.floatType) : label = "float" elif type(theInput) is type(self.imageType) : label = "image" elif type(theInput) is type(self.plyfileType) : label = "plyfile" return label def update_event(self, inp=-1): if inp == self.lastPinIndex : # check input type # handle based on input type label = self.HandleInput(self.input(inp)) stype = str(type(self.input(inp))) super().rename_inp(inp, label) super().add_inp(self.default_label) self.lastPinIndex = self.lastPinIndex + 1 if inp == -1 : self.doButtonPress() #ni = 0 #if self.input(0) != None : # self.cameraList = self.input(0) # ni = ni+1 #if self.input(1) != None : # self.extrinsicsPath = self.input(1) # ni = ni+1 # #if ni == 2 : # print('ready') # self.doVoxelCarveTSDF() nodes = [ GLNode, Button_Node, Livescan3dDir, Show_Node, CameraDirNode, DisplayImg, ReadImage, MatteExtractor, GetFilename, GetDirname, VoxelCarveTSDF, MeshIORead, GLMeshView, PlyfileRead, ExtrinsicsLogParse, GLViewerDynamic, ]
import sys sys.path.append('/home/myja3483/isce_tools/ISCE') import os import numpy import tops import re path = '/net/tiampostorage/volume1/MyleneShare/Bigsur_desc/az1rng2' dir_list = os.listdir(path) regex = re.compile(r'\d{8}_\d{8}') pair_dirs = [os.path.join(path, d) for d in list(filter(regex.search, dir_list))] faulty = [] for p in pair_dirs: mypair = tops.Pair.from_path(p) check = mypair.check_process() if check == False: print(os.path.basename(mypair.path) + ' failed') faulty.append(os.path.basename(mypair.path)) #write faulty list to .txt file: with open('faulty_processing.txt', 'w') as f: for item in faulty: f.write("%s\n" % item)
""" collipy ======= Provides an intuitive simple way to inject some particles """ from .particles import pdg from .helper import fit, n_sigma, breit_wigner, expon from .accelerator.accelerator import Accelerator from .injection import InjectionCollection, DecayMode from .helper import function_timer
# Base imports from typing import Dict, List, Union, Optional # Third-party imports import matplotlib.pyplot as plotter # Project imports from src.graphics.common import build_tc_plot_file_path from src.time_complexity.common import Number, TIME_COMPLEXITIES, TIME_RATIO_SCALE_FACTOR def plot_time_ratios( problem_size: int, time_ratios: Dict[str, Union[str, List[Number]]], plot_title: str = "Execution time ratios' comparison.", show_plot: bool = False ) -> None: """Function that receives the time ratios and plot then in a x-y graph.""" img_file_path: Optional[str] = build_tc_plot_file_path( str(time_ratios['data_struct_name']), str(time_ratios['target_name']), problem_size ) n_values: List[int] = list(range(1, problem_size + 1)) # Creating plotting handlers figure_handler, axis_handler = plotter.subplots() for func_name, ratios in time_ratios.items(): # Plotting the ratios as a function of n if isinstance(ratios, list): scaled_ratios: List[Number] = [ ratio * TIME_RATIO_SCALE_FACTOR for ratio in ratios ] if TIME_RATIO_SCALE_FACTOR else ratios axis_handler.plot( n_values, # X axis scaled_ratios, # Y axis TIME_COMPLEXITIES[func_name]['plot_pattern'], label=TIME_COMPLEXITIES[func_name]['plot_label'] ) # Setting axis labels axis_handler.set( xlabel=r"Problem size - $n$", ylabel=r"Ratio - $T_{meas}(n)/T_{theory}(n)$", title=plot_title ) # Creating legend axis_handler.legend( loc='upper right', shadow=True, ) plotter.ylim([0.0, 0.4]) # Setting plot grid axis_handler.grid() figure_handler.savefig(img_file_path) print(f'Plot image succesfully generated in {img_file_path}') if show_plot: plotter.show()
def modelling(): train_df[train_df.matchType.str.contains('normal')].groupby(['matchType']).count()
import io import unittest from contextlib import redirect_stdout from unittest.mock import patch class TestQ(unittest.TestCase): @patch('builtins.input', side_effect=[ '42 42', '__________________________________________', '__________________________________________', '__________________________________________', '__________________________________________', '__________________________________________', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '______#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_____', '_____#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#______', '__________________________________________', '__________________________________________', '__________________________________________', '__________________________________________', '__________________________________________', ]) def test_case_0(self, input_mock=None): text_trap = io.StringIO() with redirect_stdout(text_trap): import solution self.assertEqual(text_trap.getvalue(), '1\n') if __name__ == '__main__': unittest.main()
import logging from pathlib import Path import requests from docs_name_parser import parse_urls from settings import DOWNLOAD_FOLDER logger = logging.getLogger(__name__) def load_urls(): with open("doc_urls.txt") as f: raw_urls = f.read().splitlines() return sorted(parse_urls(raw_urls)) def retrieve_docs(folder: Path): urls = load_urls() for url in urls: logger.info(f"Getting {url.filename}...") r = requests.get(url.url) r.raise_for_status() with (folder / url.filename).open("wb") as f: f.write(r.content) if __name__ == "__main__": logging.basicConfig( format="[%(levelname)s %(asctime)s %(module)s:%(lineno)d] %(message)s", level=logging.INFO, ) downloads = DOWNLOAD_FOLDER downloads.mkdir(exist_ok=True) retrieve_docs(downloads)
# -*- coding: utf-8 -*- import heapq def fnHeapq(): ''' Descobrindo quais valores tem maior ou menor valor: a lib heapq filtra os menores ou maiores valres ''' idades = [10,14,12,23,84,23,55,23,58,49,6] # os trรชs menores nรบmeros # print(heapq.nsmallest(3, idades)) # os trรชs maiores nรบmeros # print(heapq.nlargest(3, idades)) # transformar uma lista em heap - return 6 # heapq.heapify(idades) # print(idades[0]) # remover o menor valor da lista heap # print(heapq.heappop(idades)) # add value on list heap # heapq.heappush(idades, 3) print(idades)
# (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from datadog_checks.aspdotnet import AspdotnetCheck from datadog_checks.base.constants import ServiceCheck from datadog_checks.dev.testing import requires_py3 from datadog_checks.dev.utils import get_metadata_metrics from .common import ASP_APP_INSTANCES, ASP_APP_METRICS, ASP_METRICS, PERFORMANCE_OBJECTS pytestmark = [requires_py3] def test(aggregator, dd_default_hostname, dd_run_check, mock_performance_objects): mock_performance_objects(PERFORMANCE_OBJECTS) check = AspdotnetCheck('aspdotnet', {}, [{'host': dd_default_hostname}]) check.hostname = dd_default_hostname dd_run_check(check) global_tags = ['server:{}'.format(dd_default_hostname)] aggregator.assert_service_check('aspdotnet.windows.perf.health', ServiceCheck.OK, count=1, tags=global_tags) for metric in ASP_METRICS: aggregator.assert_metric(metric, 9000, count=1, tags=global_tags) for metric in ASP_APP_METRICS: for instance in ASP_APP_INSTANCES[1:]: tags = ['instance:{}'.format(instance)] tags.extend(global_tags) aggregator.assert_metric(metric, 9000, count=1, tags=tags) aggregator.assert_all_metrics_covered() aggregator.assert_metrics_using_metadata(get_metadata_metrics())
################################################################################ # # display_results.py # # Shows all the results and downloads them as one .csv for each experiment # from results import * import argparse import pandas as pd import pickle def load_results(results_path): """ Loads the results and gets the accuracy and std error Args: - results_path Returns: - accuracy: accuracy for the results - std_error: std error for the results Returns: - accuracy - std_error Exception: None """ with open(results_path, 'rb') as infile: results = pickle.load(infile) return results.accuracy, results.std_error def avg_real(scores): """ Calculates the average for accuracy or std error without 'none' Args: - scores: list of accuracy or std errors Returns: - Avg if list is nonempty and real Exception: None """ scores_no_none = [x for x in scores if x != None] if len(scores_no_none) == 0: return None else: return sum(scores_no_none) / len(scores_no_none) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Display Results') # Gets the config label parser.add_argument( 'Config_Label', nargs=1, help='Subject has to be from the competition listed above') args = parser.parse_args() config_label = args.Config_Label[0] results_path_single_trial = './single_trial_results' results_path_CSWD = './CSWD_results' results_path_CSCD = './CSCD_results' # Iterates through all of the subjects and makes a csv of their results Accuracies_Single_Trial = [] Errors_Single_Trial = [] Accuracies_CSWD_0 = [] Errors_CSWD_0 = [] Accuracies_CSWD_50 = [] Errors_CSWD_50 = [] Accuracies_CSCD_0 = [] Errors_CSCD_0 = [] Accuracies_CSCD_50 = [] Errors_CSCD_50 = [] for competition in competition_names: for subject in competitions[competition]['subjects']: try: infile_path_single_trial = f'./single-trial_results/{competition}/{subject}/{config_label}.pk1' acc, err = load_results(infile_path_single_trial) Accuracies_Single_Trial.append(acc) Errors_Single_Trial.append(err) except Exception as e: print(e) Accuracies_Single_Trial.append(None) Errors_Single_Trial.append(None) print( f'Single trial not tests complete for {subject} in competition {competition}' ) try: infile_path_CSWD_0 = f'./CSWD_results/{competition}/{subject}/results_zero_{config_label}.pk1' acc_0, err_0 = load_results(infile_path_CSWD_0) Accuracies_CSWD_0.append(acc_0) Errors_CSWD_0.append(err_0) infile_path_CSWD_50 = f'./CSWD_results/{competition}/{subject}/results_fifty_{config_label}.pk1' acc_50, err_50 = load_results(infile_path_CSWD_50) Accuracies_CSWD_50.append(acc_50) Errors_CSWD_50.append(err_50) except Exception as e: print(e) Accuracies_CSWD_0.append(None) Errors_CSWD_0.append(None) Accuracies_CSWD_50.append(None) Errors_CSWD_50.append(None) print( f'CSWD tests not complete for {subject} in competition {competition}' ) try: infile_path_CSCD_0 = f'./CSCD_results/{competition}/{subject}/results_zero_{config_label}.pk1' acc_0, err_0 = load_results(infile_path_CSCD_0) Accuracies_CSCD_0.append(acc_0) Errors_CSCD_0.append(err_0) infile_path_CSCD_50 = f'./CSCD_results/{competition}/{subject}/results_fifty_{config_label}.pk1' acc_50, err_50 = load_results(infile_path_CSCD_50) Accuracies_CSCD_50.append(acc_50) Errors_CSCD_50.append(err_50) except Exception as e: print(e) Accuracies_CSCD_0.append(None) Errors_CSCD_0.append(None) Accuracies_CSCD_50.append(None) Errors_CSCD_50.append(None) print( f'CSCD tests not complete for {subject} in competition {competition}' ) if competition != 'III_IVa': # Adds a blank for the competition for every competition except the last Accuracies_Single_Trial.append(None) Errors_Single_Trial.append(None) Accuracies_CSWD_0.append(None) Errors_CSWD_0.append(None) Accuracies_CSWD_50.append(None) Errors_CSWD_50.append(None) Accuracies_CSCD_0.append(None) Errors_CSCD_0.append(None) Accuracies_CSCD_50.append(None) Errors_CSCD_50.append(None) labels = [] for competition_name in competition_names: labels.append(competition_name) for subject in competitions[competition_name]['subjects']: labels.append(subject) labels.append('Average') # Makes the dataframe for single trial Accuracies_Single_Trial.append(avg_real(Accuracies_Single_Trial)) Errors_Single_Trial.append(avg_real(Errors_Single_Trial)) dict_single_trial = { labels[0]: labels[1:], 'Accuracy': Accuracies_Single_Trial, 'STD Error': Errors_Single_Trial } df = pd.DataFrame.from_dict(dict_single_trial) df.to_csv('./single-trial_results/Single_Trial_Results.csv') # Makes the dataframe for CSWD Accuracies_CSWD_0.append(avg_real(Accuracies_CSWD_0)) Errors_CSWD_0.append(avg_real(Errors_CSWD_0)) Accuracies_CSWD_50.append(avg_real(Accuracies_CSWD_50)) Errors_CSWD_50.append(avg_real(Errors_CSWD_50)) dict_CSWD = { labels[0]: labels[1:], 'Accuracy Zero Trial': Accuracies_CSWD_0, 'STD Error Zero Trial': Errors_CSWD_0, 'Accuracy 50% Trial': Accuracies_CSWD_50, 'STD Error 50% Trial': Errors_CSWD_50 } df = pd.DataFrame.from_dict(dict_CSWD) df.to_csv('./CSWD_results/CSWD_Results.csv') # Makes the dataframe for CSCD Accuracies_CSCD_0.append(avg_real(Accuracies_CSCD_0)) Errors_CSCD_0.append(avg_real(Errors_CSCD_0)) Accuracies_CSCD_50.append(avg_real(Accuracies_CSCD_50)) Errors_CSCD_50.append(avg_real(Errors_CSCD_50)) dict_CSCD = { labels[0]: labels[1:], 'Accuracy Zero Trial': Accuracies_CSCD_0, 'STD Error Zero Trial': Errors_CSCD_0, 'Accuracy 50% Trial': Accuracies_CSCD_50, 'STD Error 50% Trial': Errors_CSCD_50 } df = pd.DataFrame.from_dict(dict_CSCD) df.to_csv('./CSCD_results/CSCD_Results.csv')
import argparse import os import evaluation def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--RESULTS_PATH", type=str, required=True) parser.add_argument("--RUN_NAME", type=str, required=True) parser.add_argument("--DATASET", type=str, required=True) args = parser.parse_args() return args if __name__ == "__main__": args = parse_args() results_path = args.RESULTS_PATH run_name = args.RUN_NAME dataset = args.DATASET folds_dict = {"KSDD": [0, 1, 2]} results_folder = os.path.join(results_path, dataset, run_name) print(f"Running evaluation for RUN {results_folder}") folds = folds_dict[dataset] ap, auc, fps, fns, best_t, t50_fps, t50_fns, fn0s, fn0_t = ( 0, 0, 0, 0, -1, 0, 0, 0, -1, ) for f in folds: t_dec, t_folds, t_gt, t_img_names, t_preds = evaluation.read_predictions( f, "", results_folder ) fold_eval_res = evaluation.evaluate_fold( results_folder, t_folds, t_gt, t_img_names, t_preds ) ap += fold_eval_res["ap"] auc += fold_eval_res["auc"] fps += fold_eval_res["fps"] fns += fold_eval_res["fns"] t50_fps += fold_eval_res["t50_fps"] t50_fns += fold_eval_res["t50_fns"] fn0s += fold_eval_res["fn0s"] ap /= len(folds) auc /= len(folds) print( f"RUN {run_name}: AP:{ap:.5f}, AUC:{auc:.5f}, FP={fps}, FN={fns}, FN@.5={t50_fns}, FP@.5={t50_fps}, FP@FN0={fn0s}" )
"""Liquid specific Exceptions and warnings.""" from typing import Any from typing import Dict from typing import Optional from typing import Type from typing import Union from pathlib import Path class Error(Exception): """Base class for all Liquid exceptions.""" def __init__( self, *args: object, linenum: Optional[int] = None, filename: Optional[Union[str, Path]] = None, ): self.linenum = linenum self.filename = filename super().__init__(*args) def __str__(self) -> str: msg = super().__str__() if self.linenum: msg = f"{msg}, on line {self.linenum}" if self.filename: msg += f" of {self.filename}" return msg @property def message(self) -> object: """Return the exception's error message if one was given.""" if self.args: return self.args[0] return None class LiquidInterrupt(Exception): """Loop interrupt""" class LiquidSyntaxError(Error): """Exception raised when there is a parser error.""" def __init__( self, *args: object, linenum: Optional[int] = None, filename: Optional[Union[str, Path]] = None, ): super().__init__(*args, linenum=linenum, filename=filename) self.source: Optional[str] = None @property def name(self) -> str: """Return the name of the template that raised this exception. Return an empty string if a name is not available.""" if isinstance(self.filename, Path): return self.filename.as_posix() if self.filename: return str(self.filename) return "" class LiquidTypeError(Error): """Exception raised when an error occurs at render time.""" class DisabledTagError(Error): """Exception raised when an attempt is made to render a disabled tag.""" class NoSuchFilterFunc(Error): """Exception raised when a filter lookup fails.""" class FilterError(Error): """Exception raised when a filter fails.""" class FilterArgumentError(Error): """Exception raised when a filters arguments are invalid.""" class FilterValueError(Error): """Exception raised when a filters value is invalid.""" class TemplateNotFound(Error): """Excpetions raised when a template could not be found.""" def __str__(self) -> str: msg = super().__str__() msg = f"template not found {msg}" return msg class ContextDepthError(Error): """Exception raised when the maximum context depth is reached. Usually indicates recursive use of ``render`` or ``include`` tags. """ class UndefinedError(Error): """Exception raised by the StrictUndefined type.""" class BreakLoop(LiquidInterrupt): """Exception raised when a BreakNode is rendered.""" class ContinueLoop(LiquidInterrupt): """Exception raised when a ContinueNode is rendered.""" class LiquidWarning(UserWarning): """Base warning.""" class LiquidSyntaxWarning(LiquidWarning): """Replaces LiquidSyntaxError when in WARN mode.""" class LiquidTypeWarning(LiquidWarning): """Replaces LiquidTypeError when in WARN mode.""" class FilterWarning(LiquidWarning): """Replaces filter exceptions when in WARN mode.""" WARNINGS: Dict[Type[Error], Type[LiquidWarning]] = { LiquidSyntaxError: LiquidSyntaxWarning, LiquidTypeError: LiquidTypeWarning, FilterArgumentError: FilterWarning, NoSuchFilterFunc: FilterWarning, } def lookup_warning(exc: Type[Error]) -> Type[LiquidWarning]: """Return a warning equivalent of the given exception.""" return WARNINGS.get(exc, LiquidWarning) def escape(val: Any) -> str: """A dummy escape function that always raises an exception.""" raise Error("autoescape requires Markupsafe to be installed") class Markup(str): """A dummy markup class that always raises an exception.""" def __init__(self, _: object): super().__init__() raise Error("autoescape requires Markupsafe to be installed") def join(self, _: object) -> str: raise Error( "autoescape requires Markupsafe to be installed" ) # pragma: no cover # pylint: disable=no-self-use,missing-function-docstring def unescape(self) -> str: raise Error( "autoescape requires Markupsafe to be installed" ) # pragma: no cover def format(self, *args: Any, **kwargs: Any) -> str: raise Error( "autoescape requires Markupsafe to be installed" ) # pragma: no cover
import operator from sympy import symbols, S import pytest from galgebra.ga import Ga from galgebra.mv import Dop, Mv from galgebra.dop import Sdop, Pdop class TestDop(object): def test_associativity_and_distributivity(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) v = ga.mv('v', 'vector', f=True) laplacian = ga.grad * ga.grad rlaplacian = ga.rgrad * ga.rgrad # check addition distributes assert (laplacian + ga.grad) * v == laplacian * v + ga.grad * v != 0 assert (laplacian + 1234567) * v == laplacian * v + 1234567 * v != 0 assert (1234 * ex + ga.grad) * v == 1234 * ex * v + ga.grad * v != 0 # check subtraction distributes assert (laplacian - ga.grad) * v == laplacian * v - ga.grad * v != 0 assert (laplacian - 1234567) * v == laplacian * v - 1234567 * v != 0 assert (1234 * ex - ga.grad) * v == 1234 * ex * v - ga.grad * v != 0 # check unary subtraction distributes assert (-ga.grad) * v == -(ga.grad * v) != 0 # check division is associative assert v * (ga.rgrad / 2) == (v * ga.rgrad) / 2 != 0 # check multiplication is associative assert (ex * ga.grad) * v == ex * (ga.grad * v) != 0 assert (20 * ga.grad) * v == 20 * (ga.grad * v) != 0 assert v * (ga.rgrad * ex) == (v * ga.rgrad) * ex != 0 assert v * (ga.rgrad * 20) == (v * ga.rgrad) * 20 != 0 assert (laplacian * ga.grad) * v == laplacian * (ga.grad * v) != 0 # check wedge is associative assert (ex ^ ga.grad) ^ v == ex ^ (ga.grad ^ v) != 0 assert (20 ^ ga.grad) ^ v == 20 ^ (ga.grad ^ v) != 0 assert v ^ (ga.rgrad ^ ex) == (v ^ ga.rgrad) ^ ex != 0 assert v ^ (ga.rgrad ^ 20) == (v ^ ga.rgrad) ^ 20 != 0 def test_empty_dop(self): """ Test that dop with zero terms is equivalent to multiplying by zero """ coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) v = ga.mv('v', 'vector', f=True) make_zero = ga.dop([]) assert make_zero * v == 0 assert make_zero * make_zero * v == 0 assert (make_zero + make_zero) * v == 0 assert (-make_zero) * v == 0 def test_misc(self): """ Other miscellaneous tests """ coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) v = ga.mv('v', 'vector', f=True) laplacian = ga.grad * ga.grad rlaplacian = ga.rgrad * ga.rgrad # laplacian is a scalar operator, so applying it from either side # is the same assert laplacian * v == v * rlaplacian assert laplacian.is_scalar() assert not ga.grad.is_scalar() # test comparison assert ga.grad == ga.grad assert not (ga.grad != ga.grad) assert ga.grad != laplacian assert not (ga.grad == laplacian) assert ga.grad != object() assert not (ga.grad == object()) # inconsistent cmpflg, not clear which side the operator goes on with pytest.raises(ValueError): ga.grad + ga.rgrad with pytest.raises(ValueError): ga.grad * ga.rgrad def test_mixed_algebras(self): coords = x, y, z = symbols('x y z', real=True) ga1, ex1, ey1, ez1 = Ga.build('e1*x|y|z', g=[1, 1, 1], coords=coords) ga2, ex2, ey2, ez2 = Ga.build('e2*x|y|z', g=[1, 1, 1], coords=coords) assert ga1 != ga2 v1 = ga1.mv('v', 'vector', f=True) v2 = ga2.mv('v', 'vector', f=True) with pytest.raises(ValueError): ga1.grad * v2 with pytest.raises(ValueError): v1 * ga2.rgrad with pytest.raises(ValueError): ga1.grad * ga2.grad def test_components(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) components = ga.grad.components() assert components == ( ex * (ex | ga.grad), ey * (ey | ga.grad), ez * (ez | ga.grad), ) def test_constructor_errors(self): ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1]) # list lengths must match with pytest.raises(ValueError, match='same length'): Dop([ex], [], ga=ga) # the two conventions can't be mixed mixed_args = [ (ex, Pdop({})), (Sdop([]), ex), ] with pytest.raises(TypeError, match='pairs'): Dop(mixed_args, ga=ga) # ga must be non-none with pytest.raises(ValueError, match='must not be None'): Dop([], ga=None) # too few arguments with pytest.raises(TypeError, match='0 were given'): Dop(ga=ga) # too many arguments with pytest.raises(TypeError, match='3 were given'): Dop(1, 2, 3, ga=ga) class TestSdop(object): def test_deprecation(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) with pytest.warns(DeprecationWarning): ga.sPds def test_shorthand(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) assert Sdop(x) == Sdop([(S(1), Pdop({x: 1}))]) def test_empty_sdop(self): """ Test that sdop with zero terms is equivalent to multiplying by zero """ coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) v = ga.mv('v', 'vector', f=True) make_zero = Sdop([]) assert make_zero * v == 0 assert make_zero * make_zero * v == 0 assert (make_zero + make_zero) * v == 0 assert (-make_zero) * v == 0 def test_associativity_and_distributivity(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) v = ga.mv('v', 'vector', f=True) laplacian = Sdop((ga.grad * ga.grad).terms) # check addition distributes assert (laplacian + 20) * v == laplacian * v + 20 * v != 0 assert (20 + laplacian) * v == laplacian * v + 20 * v != 0 # check subtraction distributes assert (laplacian - 20) * v == laplacian * v - 20 * v != 0 assert (20 - laplacian) * v == 20 * v - laplacian * v != 0 # check unary subtraction distributes assert (-laplacian) * v == -(laplacian * v) != 0 # check multiplication is associative assert (20 * laplacian) * v == 20 * (laplacian * v) != 0 assert (laplacian * laplacian) * v == laplacian * (laplacian * v) != 0 def test_misc(self): """ Other miscellaneous tests """ coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) laplacian = ga.sdop((ga.grad * ga.grad).terms) lap2 = laplacian*laplacian # test comparison assert lap2 == lap2 assert not (lap2 != lap2) assert lap2 != laplacian assert not (lap2 == laplacian) assert lap2 != object() assert not (lap2 == object()) def chain_with_pdop(self): x, y = symbols('x y', real=True) s = Sdop([(x, Pdop(x)), (y, Pdop(y))]) # right-multiplication by Pdop chains only the pdiffs sp = s * Pdop(x) assert sp == Sdop([(x, Pdop(x) * Pdop(x)), (y, Pdop(y) * Pdop(x))]) # left-multiplcation by Pdop invokes the product rule ps = Pdop(x) * s assert ps == Sdop([(x, Pdop(x) * Pdop(x)), (1, Pdop(x)), (y, Pdop(y) * Pdop(x))]) # implicit multiplication assert ps == Pdop(x)(s) assert sp == s(Pdop(x)) # no-op pdop assert s == Pdop({})(s) assert s == s(Pdop({})) def chain_with_mv(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) s = Sdop([(x, Pdop(x)), (y, Pdop(y))]) assert type(ex * s) is Sdop assert type(s * ex) is Mv # type should be preserved even when the result is 0 assert type(ex * Sdop([])) is Sdop assert type(Sdop([]) * ex) is Mv # As discussed with brombo, these operations are not well defined - if # you need them, you should be using `Dop` not `Sdop`. for op in [operator.xor, operator.or_, operator.lt, operator.gt]: with pytest.raises(TypeError): op(ex, s) with pytest.raises(TypeError): op(s, ex) def test_constructor_errors(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) # list lengths must match with pytest.raises(ValueError, match='same length'): Sdop([ex], []) # not a symbol or list with pytest.raises(TypeError, match='symbol or sequence is required'): Sdop(1) # not a pair of lists with pytest.raises(TypeError, match='must be lists'): Sdop([], 1) # too few arguments with pytest.raises(TypeError, match='0 were given'): Sdop() # too many arguments with pytest.raises(TypeError, match='3 were given'): Sdop(1, 2, 3) class TestPdop(object): def test_deprecation(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) # passing `None` is a deprecate way to spell `{}` with pytest.warns(DeprecationWarning): p = Pdop(None) assert p == Pdop({}) with pytest.warns(DeprecationWarning): ga.Pdop_identity with pytest.warns(DeprecationWarning): ga.Pdiffs def test_misc(self): """ Other miscellaneous tests """ coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) pxa = ga.pdop({x: 1}) pxb = ga.pdop({x: 1}) p1 = ga.pdop({}) # test comparison assert pxa == pxb assert not (pxa != pxb) assert p1 != pxa assert not (p1 == pxa) assert pxa != object() assert not (pxa == object()) def test_multiply(self): coords = x, y, z = symbols('x y z', real=True) ga, ex, ey, ez = Ga.build('e*x|y|z', g=[1, 1, 1], coords=coords) p = Pdop(x) assert x * p == Sdop([(x, p)]) assert ex * p == Sdop([(ex, p)]) assert p * x == p(x) == S(1) assert p * ex == p(ex) == S(0) assert type(p(ex)) is Mv # These are not defined for consistency with Sdop for op in [operator.xor, operator.or_, operator.lt, operator.gt]: with pytest.raises(TypeError): op(ex, p) with pytest.raises(TypeError): op(p, ex) def test_constructor_errors(self): # not a symbol or dict with pytest.raises(TypeError, match='dictionary or symbol is required'): Pdop(1)
import asyncio import websockets connected = [] bits = [] async def handler(websocket, path): global connected # Register. clientId = len(connected) bits.append(False) connected.append(websocket) try: # Implement logic here. while True: mssg = await websocket.recv() if (mssg == "true"): #print("{} True".format(clientId)) bits[clientId] = True else: #print("{} False".format(clientId)) bits[clientId] = False #await asyncio.wait([ws.send("Hello!") for ws in connected]) except: # Unregister. connected.remove(websocket) async def update(): while True: print("Update") bit = False for b in bits: if (b): bit = True break for ws in connected: await ws.send(str(bit).lower()) await asyncio.sleep(0.1) start_server = websockets.serve(handler, 'localhost', 8765) asyncio.get_event_loop().run_until_complete(start_server) asyncio.async(update()) asyncio.get_event_loop().run_forever()
from .envs import env_0_args, env_1_args from .envs import Observation from .envs import Configuration from .envs import Session from .envs import Context, DefaultContext from .constants import ( AgentStats, AgentInit, EvolutionCase, TrainingApproach, RoiMetrics ) from .bench_agents import test_agent from .evolute_agent import evolute_agent from .evolute_agent import ( build_agent_init, gather_agent_stats, plot_agent_stats, plot_evolution_stats, plot_heat_actions, plot_roi ) from .envs.features.time.default_time_generator import DefaultTimeGenerator from .envs.features.time.normal_time_generator import NormalTimeGenerator from gym.envs.registration import register register( id = 'reco-gym-v0', entry_point = 'reco_gym.envs:RecoEnv0' ) register( id = 'reco-gym-v1', entry_point = 'reco_gym.envs:RecoEnv1' )
# Main logger import logging log = logging.getLogger('orgassist') from . import config from . import calendar from . import bots from . import assistant from .assistant import Assistant # Register commands from . import plugins
# -*- coding: utf-8 -*- from __future__ import annotations # system imports import os.path as osp import asyncio import urllib.parse from typing import Any # external imports import toga from toga.style.pack import Pack from toga.constants import ROW, COLUMN from maestral.daemon import MaestralProxy from maestral.models import SyncErrorEntry from maestral.utils import sanitize_string # local imports from .private.widgets import Label, FollowLinkButton, Icon, Window from .private.constants import WORD_WRAP PADDING = 10 ICON_SIZE = 48 WINDOW_SIZE = (370, 400) class SyncIssueView(toga.Box): def __init__(self, sync_err: SyncErrorEntry) -> None: super().__init__(style=Pack(direction=COLUMN)) self.sync_err = sync_err icon = Icon(for_path=self.sync_err.local_path) # noinspection PyTypeChecker image_view = toga.ImageView( image=icon, style=Pack( width=ICON_SIZE, height=ICON_SIZE, padding=(0, 12, 0, 3), ), ) path_label = Label( sanitize_string(osp.basename(self.sync_err.dbx_path)), style=Pack( padding_bottom=PADDING / 2, ), ) error_label = Label( f"{self.sync_err.title}:\n{self.sync_err.message}", linebreak_mode=WORD_WRAP, style=Pack( font_size=11, width=WINDOW_SIZE[0] - 4 * PADDING - 15 - ICON_SIZE, padding_bottom=PADDING / 2, ), ) link_local = FollowLinkButton( "Show in Finder", url=self.sync_err.local_path, enabled=osp.exists(self.sync_err.local_path), locate=True, style=Pack( padding_right=PADDING, font_size=11, height=12, ), ) quoted_dbx_path = urllib.parse.quote(self.sync_err.dbx_path) dbx_address = f"https://www.dropbox.com/preview{quoted_dbx_path}" link_dbx = FollowLinkButton( "Show Online", url=dbx_address, style=Pack(font_size=11, height=12), ) link_box = toga.Box( children=[link_local, link_dbx], style=Pack(direction=ROW), ) info_box = toga.Box( children=[path_label, error_label, link_box], style=Pack(direction=COLUMN, flex=1), ) content_box = toga.Box( children=[image_view, info_box], style=Pack(direction=ROW), ) hline = toga.Divider(style=Pack(padding=(PADDING, 0, PADDING, 0))) self.add(content_box, hline) class SyncIssuesWindow(Window): def __init__(self, mdbx: MaestralProxy, app: toga.App) -> None: super().__init__(title="Maestral Sync Issues", release_on_close=False, app=app) self.on_close = self.on_close_pressed self.mdbx = mdbx self._refresh = False self._refresh_interval = 1 self._sync_issue_widgets: dict[str, SyncIssueView] = dict() self._placeholder = Label( "No sync issues ๐Ÿ˜Š", style=Pack(padding_bottom=PADDING) ) self.size = WINDOW_SIZE self.sync_errors_box = toga.Box( style=Pack( direction=COLUMN, padding=2 * PADDING, ), ) self.scroll_container = toga.ScrollContainer( content=self.sync_errors_box, horizontal=False, ) self.content = self.scroll_container self.center() self.refresh_gui() async def periodic_refresh_gui(self, sender: Any = None) -> None: while self._refresh: self.refresh_gui() await asyncio.sleep(self._refresh_interval) def _has_placeholder(self) -> bool: return self._placeholder in self.sync_errors_box.children def refresh_gui(self) -> None: new_errors = self.mdbx.sync_errors # remove placeholder if the error count > 0 if len(new_errors) > 0 and self._has_placeholder(): self.sync_errors_box.remove(self._placeholder) # add new errors new_err_paths: set[str] = set() for error in new_errors: new_err_paths.add(error.dbx_path) if error.dbx_path not in self._sync_issue_widgets: widget = SyncIssueView(error) self.sync_errors_box.add(widget) self._sync_issue_widgets[error.dbx_path] = widget # remove old errors for dbx_path in self._sync_issue_widgets.copy(): if dbx_path not in new_err_paths: widget = self._sync_issue_widgets.pop(dbx_path) self.sync_errors_box.remove(widget) # add placeholder if we don't have any errors if len(new_errors) == 0 and not self._has_placeholder(): self.sync_errors_box.add(self._placeholder) def on_close_pressed(self, sender: Any = None) -> bool: self._refresh = False return True def show(self) -> None: self._refresh = True self.app.add_background_task(self.periodic_refresh_gui) super().show()
import torch import torch.nn as nn import random import torchvision.transforms as T class PhotoMetricDistortion(nn.Module): def __init__(self, brightness: float = 0.4, contrast: float = 0.4, saturation: float = 0.4, hue: float = 0.4): super().__init__() self.cj = T.ColorJitter(brightness, contrast, saturation, hue) def forward(self, dc: dict) -> dict: image = dc['image'] image = self.cj(image) if random.randint(0, 1): image = image[torch.randperm(3)] dc['image'] = image return dc class Normalize(nn.Module): def __init__(self, mean: list, std: list): super().__init__() self.normalize = T.Normalize(mean, std) def forward(self, dc: dict) -> dict: image = dc['image'] dc['image'] = self.normalize(image) return dc
from django.contrib.auth.models import User from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from posts.models import Post, Share, Comment, Like class PostTests(APITestCase): def test_create_post(self): """ Ensure we can create a new tweet object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") url = reverse('post-list') data = {'text': 'DabApps', "user": user.id} response = self.client.post(url, data, format='json') response_data = response.json() print("COMPLETE") self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(Post.objects.count(), 1) self.assertEqual(Post.objects.get().text, data['text']) self.assertEqual(response_data['text'], data['text']) def test_put_post(self): """ Ensure we can create a new tweet object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="azad testing game ", user=user) url = reverse('post-detail',kwargs={"pk":post.id}) data = {'text': 'For this Lagos?', "user": user.id, "post": post.id} response = self.client.put(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) # self.assertEqual(Tweet.objects.count(), 1) # self.assertEqual(Tweet.objects.get().text, data['text']) # self.assertEqual(response_data['text'], data['text']) def test_patch_post(self): """ Ensure we can create a new tweet object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="it's a testing game ", user=user) url = reverse('post-detail',kwargs={"pk":post.id}) data = {'text': 'For this Lagos?', "user": user.id, "post": post.id} response = self.client.patch(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_1post(self): """ Ensure we can create a new tweet object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="File! issa testing game ", user=user) url = reverse('post-detail',kwargs={"pk":post.id}) # data = {'text': 'DabApps', "user": user.id} response = self.client.get(url, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(Post.objects.count(), 1) # self.assertEqual(response_data[0]["text"], tweet.text) def test_get_post(self): """ Ensure we can create a new post object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="azad testing game ", user=user) url = reverse('post-detail',kwargs={"pk":post.id}) # data = {'text': 'DabApps', "user": user.id} response = self.client.get(url, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_post(self): """ Ensure we can create a new post object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="testing game", user=user) url = reverse('post-detail', kwargs={'pk': post.id}) response = self.client.delete(url, format='json') # response_data = response.json() # print(response_data) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class ShareTests(APITestCase): def test_create_retweet(self): """ Ensure we can create a new retweet object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="this is a simple post", user=user) url = reverse('share-list') data = {'post': post.id, "user": user.id} response = self.client.post(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_get_share(self): """ Ensure we can GET a new share object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="this is a simple post", user=user) url = reverse('share-list') data = {'post': post.id, "user": user.id} response = self.client.get(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_share(self): """ Ensure we can GET a new share object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="created this post", user=user) share = Share.objects.create(user=user, post=post) url = reverse('like-detail', kwargs={"pk": share.id}) # data = {'post': post.id, "like": like.id} response = self.client.delete(url, format='json') # response_data = response.json() # print(response_data) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class LikeTests(APITestCase): def test_create_like(self): """ Ensure we can create a new like object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="created this post", user=user) url = reverse('like-list') data = {'post': post.id, "user": user.id} response = self.client.post(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_get_like(self): """ Ensure we can GET a new like object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="my own post test", user=user) url = reverse('like-list') data = {'post': post.id, "user": user.id} response = self.client.get(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_1like(self): """ Ensure we can GET a new like object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="django don loud ooo!!!", user=user) url = reverse('like-list') data = {'post': post.id, "user": user.id} response = self.client.get(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_like(self): """ Ensure we can GET a new like object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="created this post", user=user) like = Like.objects.create(user=user, post=post) url = reverse('like-detail', kwargs={"pk": like.id}) # data = {'post': post.id, "like": like.id} response = self.client.delete(url, format='json') # response_data = response.json() # print(response_data) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) class CommentTests(APITestCase): def test_create_comment(self): """ Ensure we can GET a new comment object. . """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="this is a comment", user=user) url = reverse('comment-list') data = {'post': post.id, "user": user.id} response = self.client.post(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_get_comment(self): """ Ensure we can GET a new post object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="this is a simple post", user=user) url = reverse('comment-list') data = {'post': post.id, "user": user.id} response = self.client.get(url, data, format='json') response_data = response.json() print(response_data) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_delete_comment(self): """ Ensure we can GET a new post object. """ user = User.objects.create_user(username="davinchy", password="johnson.py") post = Post.objects.create(text="create a post", user=user) comment = Comment.objects.create(user=user, post=post, text="e go better!") url = reverse('comment-detail', kwargs={"pk": comment.id}) # data = {'post': post.id, "like": like.id} response = self.client.delete(url, format='json') # response_data = response.json() # print(response_data) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data import copy import argparse import logging import gc import datetime import pprint from collections import OrderedDict, defaultdict from functools import partial from torch.optim import Adam try: from torch.utils.tensorboard import SummaryWriter except BaseException as e: from tensorboardX import SummaryWriter from model import BMGFModel from dataset import Dataset, Sampler from evaluate import evaluate_accuracy, evaluate_precision_recall_f1 from util import * INF = 1e30 _INF = -1e30 def eval_epoch(args, logger, writer, model, data_type, data_loader, device, epoch): model.eval() epoch_step = len(data_loader) total_step = args.epochs * epoch_step total_cnt = 0 total_ce = 0.0 total_mlce = 0.0 total_loss = 0.0 results = {"data": {"id": list(), "relation": list(), "prefered_relation": list()}, "prediction": {"prob": list(), "pred": list()}, "error": {"ce": list(), "mlce": list(), "mean_ce": INF, "mean_mlce": INF}, "evaluation": {"accuracy": dict(), "precision_recall_f1": dict()}} with torch.no_grad(): for batch_id, batch in enumerate(data_loader): step = epoch*epoch_step+batch_id _id, arg1, arg1_mask, arg2, arg2_mask, relation, prefered_relation = batch prefered_relation = (relation[:, 1] >= 0.5).long() bsz = len(_id) total_cnt += bsz results["data"]["id"].extend(_id) results["data"]["relation"].extend(relation) results["data"]["prefered_relation"].extend(prefered_relation) arg1 = arg1.to(device) arg2 = arg2.to(device) if arg1_mask is not None: arg1_mask = arg1_mask.to(device) if arg2_mask is not None: arg2_mask = arg2_mask.to(device) relation = relation.to(device) prefered_relation = prefered_relation.to(device) output = model(arg1, arg2, arg1_mask, arg2_mask) logp = F.log_softmax(output, dim=-1) prob = logp.exp() results["prediction"]["prob"].extend(prob.cpu().detach()) results["prediction"]["pred"].extend(prob.cpu().argmax(dim=1).detach()) ce = F.nll_loss(logp, prefered_relation, reduction="none") mlce = F.multilabel_soft_margin_loss(output, relation, reduction="none") results["error"]["ce"].extend(ce.cpu().detach()) results["error"]["mlce"].extend(mlce.cpu().detach()) if args.loss == "ce": loss = ce elif args.loss == "mlce": loss = mlce else: raise NotImplementedError("Error: loss=%s is not supported now." % (args.loss)) avg_ce = ce.mean() avg_mlce = mlce.mean() avg_loss = loss.mean() total_ce += avg_ce.item() * bsz total_mlce += avg_mlce.item() * bsz total_loss += avg_loss.item() * bsz if writer: writer.add_scalar("%s/pdtb-loss" % (data_type), avg_loss.item(), step) writer.add_scalar("%s/pdtb-ce" % (data_type), avg_ce.item(), step) writer.add_scalar("%s/pdtb-mlce" % (data_type), avg_mlce.item(), step) if logger and batch_id == epoch_step-1: logger.info( "epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}\tbatch: {:0>5d}/{:0>5d}".format( epoch, args.epochs, data_type, batch_id, epoch_step) + "\n" + "\tpdtb-loss: {:10.4f}\tpdtb-ce: {:10.4f}\tpdtb-mlce: {:10.4f}".format( avg_loss.item(), avg_ce.item(), avg_mlce.item()) + "\n" + "\tpdtb-gold: {}".format(results["data"]["relation"][-1]) + "\n" + "\tpdtb-pred: {}".format(results["prediction"]["prob"][-1])) mean_ce = total_ce / (total_cnt + 1e-6) mean_mlce = total_mlce / (total_cnt + 1e-6) mean_loss = total_loss / (total_cnt + 1e-6) pred = np.array(results["prediction"]["pred"]) target = torch.cat(results["data"]["relation"], dim=0).view(total_cnt, -1).int().numpy() prefered_target = np.array(results["data"]["prefered_relation"]) results["error"]["mean_ce"] = mean_ce results["error"]["mean_mlce"] = mean_mlce results["evaluation"]["accuracy"] = evaluate_accuracy(pred, target, prefered_target) results["evaluation"]["precision_recall_f1"] = evaluate_precision_recall_f1(pred, target, prefered_target, "binary") if writer: writer.add_scalar("%s/pdtb-loss-epoch" % (data_type), mean_loss, epoch) writer.add_scalar("%s/pdtb-ce-epoch" % (data_type), mean_ce, epoch) writer.add_scalar("%s/pdtb-mlce-epoch" % (data_type), mean_mlce, epoch) if logger: logger.info( "epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}".format( epoch, args.epochs, data_type) + "\n" + "\tpdtb-loss-epoch: {:10.4f}\tpdtb-ce-epoch: {:10.4f}\tpdtb-mlce-epoch: {:10.4f}".format( mean_loss, mean_ce, mean_mlce) + "\n" + "\tpdtb-accuray: {}".format( pprint.pformat(results["evaluation"]["accuracy"]).replace("\n", "\n\t\t")) + "\n" + "\tpdtb-precision_recall_f1: {}".format( pprint.pformat(results["evaluation"]["precision_recall_f1"]).replace("\n", "\n\t\t"))) gc.collect() return mean_loss, results def train_epoch(args, logger, writer, model, optimizer, data_type, data_loader, device, epoch): model.train() epoch_step = len(data_loader) total_step = args.epochs * epoch_step total_cnt = 0 total_ce = 0.0 total_mlce = 0.0 total_loss = 0.0 results = {"data": {"id": list(), "relation": list(), "prefered_relation": list()}, "prediction": {"prob": list(), "pred": list()}, "error": {"ce": list(), "mlce": list(), "mean_ce": INF, "mean_mlce": INF}, "evaluation": {"accuracy": dict(), "precision_recall_f1": dict()}} for batch_id, batch in enumerate(data_loader): step = epoch*epoch_step+batch_id _id, arg1, arg1_mask, arg2, arg2_mask, relation, prefered_relation = batch prefered_relation = (relation[:, 1] >= 0.5).long() bsz = len(_id) total_cnt += bsz results["data"]["id"].extend(_id) results["data"]["relation"].extend(relation) results["data"]["prefered_relation"].extend(prefered_relation) arg1 = arg1.to(device) arg2 = arg2.to(device) if arg1_mask is not None: arg1_mask = arg1_mask.to(device) if arg2_mask is not None: arg2_mask = arg2_mask.to(device) relation = relation.to(device) prefered_relation = prefered_relation.to(device) output = model(arg1, arg2, arg1_mask, arg2_mask) logp = F.log_softmax(output, dim=1) prob = logp.exp() results["prediction"]["prob"].extend(prob.cpu().detach()) results["prediction"]["pred"].extend(prob.cpu().argmax(dim=1).detach()) ce = F.nll_loss(logp, prefered_relation, reduction="none") mlce = F.multilabel_soft_margin_loss(output, relation, reduction="none") results["error"]["ce"].extend(ce.cpu().detach()) results["error"]["mlce"].extend(mlce.cpu().detach()) if args.loss == "ce": loss = ce elif args.loss == "mlce": loss = mlce else: raise NotImplementedError("Error: loss=%s is not supported now." % (args.loss)) avg_ce = ce.mean() avg_mlce = mlce.mean() avg_loss = loss.mean() total_ce += avg_ce.item() * bsz total_mlce += avg_mlce.item() * bsz total_loss += avg_loss.item() * bsz avg_loss.backward() if args.max_grad_norm > 0: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() optimizer.zero_grad() if writer: writer.add_scalar("%s/pdtb-loss" % (data_type), avg_loss.item(), step) writer.add_scalar("%s/pdtb-ce" % (data_type), avg_ce.item(), step) writer.add_scalar("%s/pdtb-mlce" % (data_type), avg_mlce.item(), step) if logger and (batch_id%args.print_every == 0 or batch_id == epoch_step-1): logger.info( "epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}\tbatch: {:0>5d}/{:0>5d}".format( epoch, args.epochs, data_type, batch_id, epoch_step) + "\n" + "\tpdtb-loss: {:10.4f}\tpdtb-ce: {:10.4f}\tpdtb-mlce: {:10.4f}".format( avg_loss.item(), avg_ce.item(), avg_mlce.item()) + "\n" + "\tpdtb-gold: {}".format(results["data"]["relation"][-1]) + "\n" + "\tpdtb-pred: {}".format(results["prediction"]["prob"][-1])) mean_ce = total_ce / (total_cnt + 1e-6) mean_mlce = total_mlce / (total_cnt + 1e-6) mean_loss = total_loss / (total_cnt + 1e-6) pred = np.array(results["prediction"]["pred"]) target = torch.cat(results["data"]["relation"], dim=0).view(total_cnt, -1).int().numpy() prefered_relation = np.array(results["data"]["prefered_relation"]) results["error"]["mean_ce"] = mean_ce results["error"]["mean_mlce"] = mean_mlce results["evaluation"]["accuracy"] = evaluate_accuracy(pred, target, prefered_relation) results["evaluation"]["precision_recall_f1"] = evaluate_precision_recall_f1(pred, target, prefered_relation, "binary") if writer: writer.add_scalar("%s/pdtb-loss-epoch" % (data_type), mean_loss, epoch) writer.add_scalar("%s/pdtb-ce-epoch" % (data_type), mean_ce, epoch) writer.add_scalar("%s/pdtb-mlce-epoch" % (data_type), mean_mlce, epoch) if logger: logger.info( "epoch: {:0>3d}/{:0>3d}\tdata_type: {:<5s}".format( epoch, args.epochs, data_type) + "\n" + "\tpdtb-loss-epoch: {:10.4f}\tpdtb-ce-epoch: {:10.4f}\tpdtb-mlce-epoch: {:10.4f}".format( mean_loss, mean_ce, mean_mlce) + "\n" + "\tpdtb-accuray: {}".format( pprint.pformat(results["evaluation"]["accuracy"]).replace("\n", "\n\t\t")) + "\n" + "\tpdtb-precision_recall_f1: {}".format( pprint.pformat(results["evaluation"]["precision_recall_f1"]).replace("\n", "\n\t\t"))) gc.collect() return mean_loss, results def train(args, logger, writer): # set device if args.gpu_ids is None: device = torch.device("cpu") else: if isinstance(args.gpu_ids, int): args.gpu_ids = [args.gpu_ids] device = torch.device("cuda:%d" % args.gpu_ids[0]) torch.cuda.set_device(device) args.num_rels = 2 # for binary classification if args.pretrained_model_path: # load pretrained model config = load_config(os.path.join(args.pretrained_model_path, "BMGFModel.config")) for by in ["accf1", "f1", "accuracy", "loss"]: best_epochs = get_best_epochs(os.path.join(args.pretrained_model_path, "BMGFModel.log"), by=by) if len(best_epochs) > 0: break logger.info("retrieve the best epochs for BMGFModel: %s" % (best_epochs)) if len(best_epochs) > 0: model = BMGFModel(**(config._asdict())) if "test" in best_epochs: model.load_state_dict(torch.load( os.path.join(args.pretrained_model_path, "epoch%d.pt" % (best_epochs["test"])), map_location=device)) elif "valid" in best_epochs: model.load_state_dict(torch.load( os.path.join(args.pretrained_model_path, "epoch%d.pt" % (best_epochs["valid"])), map_location=device)) else: model.load_state_dict(torch.load( os.path.join(args.pretrained_model_path, "epoch%d.pt" % (best_epochs["train"])), map_location=device)) if config.dropout != args.dropout: change_dropout_rate(model, args.dropout) else: raise ValueError("Error: cannot load BMGFModel from %s." % (args.pretrained_model_path)) else: # build model model = BMGFModel(**vars(args)) model.set_finetune(args.finetune) if args.gpu_ids and len(args.gpu_ids) > 1: model = nn.DataParallel(model, device_ids=args.gpu_ids) model = model.to(device) logger.info(model) logger.info("num of trainable parameters: %d" % ( sum(p.numel() for p in model.parameters() if p.requires_grad))) # load data datasets = OrderedDict({ "train": Dataset().load_pt(args.train_dataset_path), "valid": Dataset().load_pt(args.valid_dataset_path), "test": Dataset().load_pt(args.test_dataset_path)}) if args.explicit_dataset_path != "": explicit_dataset = Dataset().load_pt(args.explicit_dataset_path) datasets["train"].data.extend(explicit_dataset.data) del explicit_dataset logger.info("train:valid:test = %d:%d:%d" % (len(datasets["train"]), len(datasets["valid"]), len(datasets["test"]))) rel_map = defaultdict(int) for r in args.relations: for k in Dataset.rel_map_4.keys(): if k.startswith(r): rel_map[k] = 1 assert len(rel_map) > 0 if args.encoder == "roberta": pad_id = 1 else: pad_id = 0 data_loaders = OrderedDict() batchify = partial(Dataset.batchify, rel_map=rel_map, min_arg=args.min_arg, max_arg=args.max_arg, pad_id=pad_id) for data_type in datasets: sampler = Sampler(datasets[data_type], group_by=["arg1", "arg2"], batch_size=args.batch_size, shuffle=data_type=="train", drop_last=False) data_loaders[data_type] = data.DataLoader(datasets[data_type], batch_sampler=sampler, collate_fn=batchify, pin_memory=data_type=="train") # optimizer and losses optimizer = Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) optimizer.zero_grad() best_losses = {dataset: INF for dataset in datasets} best_loss_epochs = {dataset: -1 for dataset in datasets} best_accs = {dataset: _INF for dataset in datasets} best_acc_epochs = {dataset: -1 for dataset in datasets} best_f1s = {dataset: _INF for dataset in datasets} best_f1_epochs = {dataset: -1 for dataset in datasets} best_accf1s = {dataset: _INF for dataset in datasets} best_accf1_epochs = {dataset: -1 for dataset in datasets} for epoch in range(args.epochs): for data_type, data_loader in data_loaders.items(): if data_type == "train": mean_loss, results = train_epoch(args, logger, writer, model, optimizer, data_type, data_loader, device, epoch) else: mean_loss, results = eval_epoch(args, logger, writer, model, data_type, data_loader, device, epoch) save_results(results, os.path.join(args.save_model_dir, "%s_results%d.json" % (data_type, epoch))) if mean_loss <= best_losses[data_type]: best_losses[data_type] = mean_loss best_loss_epochs[data_type] = epoch logger.info("data_type: {:<5s}\tbest pdtb-loss: {:.4f} (epoch: {:0>3d})".format( data_type, best_losses[data_type], best_loss_epochs[data_type])) if args.save_best == "loss": if args.gpu_ids and len(args.gpu_ids) > 1: torch.save(model.module.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) else: torch.save(model.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) if results["evaluation"]["accuracy"]["overall"] >= best_accs[data_type]: best_accs[data_type] = results["evaluation"]["accuracy"]["overall"] best_acc_epochs[data_type] = epoch logger.info("data_type: {:<5s}\tbest pdtb-accuracy: {:.4f} (epoch: {:0>3d})".format( data_type, best_accs[data_type], best_acc_epochs[data_type])) if args.save_best == "acc": if args.gpu_ids and len(args.gpu_ids) > 1: torch.save(model.module.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) else: torch.save(model.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) if results["evaluation"]["precision_recall_f1"]["overall"][-1] >= best_f1s[data_type]: best_f1s[data_type] = results["evaluation"]["precision_recall_f1"]["overall"][-1] best_f1_epochs[data_type] = epoch logger.info("data_type: {:<5s}\tbest pdtb-f1: {:.4f} (epoch: {:0>3d})".format( data_type, best_f1s[data_type], best_f1_epochs[data_type])) if args.save_best == "f1": if args.gpu_ids and len(args.gpu_ids) > 1: torch.save(model.module.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) else: torch.save(model.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) if results["evaluation"]["accuracy"]["overall"]+results["evaluation"]["precision_recall_f1"]["overall"][-1] >= best_accf1s[data_type]: best_accf1s[data_type] = results["evaluation"]["accuracy"]["overall"]+results["evaluation"]["precision_recall_f1"]["overall"][-1] best_accf1_epochs[data_type] = epoch logger.info("data_type: {:<5s}\tbest pdtb-accf1: {:.4f} (epoch: {:0>3d})".format( data_type, best_accf1s[data_type], best_accf1_epochs[data_type])) if args.save_best == "accf1": if args.gpu_ids and len(args.gpu_ids) > 1: torch.save(model.module.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) else: torch.save(model.state_dict(), os.path.join(args.save_model_dir, "%s_best.pt" % (data_type)), _use_new_zipfile_serialization=False) for data_type in data_loaders: logger.info("data_type: {:<5s}\tbest pdtb-loss: {:.4f} (epoch: {:0>3d})".format( data_type, best_losses[data_type], best_loss_epochs[data_type])) logger.info("data_type: {:<5s}\tbest pdtb-accuracy: {:.4f} (epoch: {:0>3d})".format( data_type, best_accs[data_type], best_acc_epochs[data_type])) logger.info("data_type: {:<5s}\tbest pdtb-f1: {:.4f} (epoch: {:0>3d})".format( data_type, best_f1s[data_type], best_f1_epochs[data_type])) logger.info("data_type: {:<5s}\tbest pdtb-accf1: {:.4f} (epoch: {:0>3d})".format( data_type, best_accf1s[data_type], best_accf1_epochs[data_type])) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--seed", type=int, default=0, help="random seed") parser.add_argument("--n_workers", type=int, default=1, help="numer of processors") # data config parser.add_argument("--explicit_dataset_path", type=str, default="", help="explicit Dataset path") parser.add_argument("--train_dataset_path", type=str, help="training Dataset path") parser.add_argument("--valid_dataset_path", type=str, help="validation Dataset path") parser.add_argument("--test_dataset_path", type=str, help="test Dataset path") parser.add_argument("--pretrained_model_path", type=str, default="", help="model path of pretrained BMGFModel") parser.add_argument("--save_model_dir", type=str, help="model dir to save models") parser.add_argument("--relations", type=str2list, default="", help="which relations are computed") parser.add_argument("--min_arg", type=int, default=3, help="the minimum length of arguments") parser.add_argument("--max_arg", type=int, default=512, help="the maximum length of arguments") # training config parser.add_argument("--gpu_ids", type=str2list, default=None, help="gpu ids") parser.add_argument("--epochs", type=int, default=50, help="epochs of training") parser.add_argument("--batch_size", type=int, default=32, help="batch size of training") parser.add_argument("--print_every", type=int, default=100, help="printing log every K batchs") parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer") parser.add_argument("--weight_decay", type=float, default=0.0005, help="weight decay") parser.add_argument("--max_grad_norm", type=float, default=2.0, help="max grad norm for gradient clipping") parser.add_argument("--save_best", type=str, default="f1", choices=["loss", "acc", "f1", "accf1"], help="the criteria to save best models") parser.add_argument("--loss", type=str, default="ce", choices=["ce", "mlce"], help="loss function") # BMGFModel config parser.add_argument("--encoder", type=str, default="roberta", choices=["lstm", "bert", "roberta"], help="the encoder") parser.add_argument("--finetune", type=str, default="type", choices=["none", "type", "last", "full"], help="how to finetune the encoder") parser.add_argument("--hidden_dim", type=int, default=128, help="hidden dimension") parser.add_argument("--num_lstm_layers", type=int, default=1, help="number of lstm layers") parser.add_argument("--num_perspectives", type=int, default=16, help="number of perspectives for bimpm") parser.add_argument("--num_filters", type=int, default=64, help="number of filters for convolutional layers") parser.add_argument("--activation", type=str, default="leaky_relu", choices=["relu", "tanh", "softmax", "sigmoid", "leaky_relu", "prelu", "gelu"], help="activation function type") parser.add_argument("--dropout", type=float, default=0.2, help="dropout for neural networks") args = parser.parse_args() torch.manual_seed(args.seed) np.random.seed(args.seed) assert len(args.relations) > 0 ts = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S') args.save_model_dir = os.path.join(args.save_model_dir, ts) os.makedirs(args.save_model_dir, exist_ok=True) # save config save_config(args, os.path.join(args.save_model_dir, "BMGFModel.config")) # build logger logger = logging.getLogger() logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%Y/%m/%d %H:%M:%S') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) logfile = logging.FileHandler(os.path.join(args.save_model_dir, "BMGFModel.log"), 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) # build writer writer = SummaryWriter(args.save_model_dir) # train train(args, logger, writer)
import re # The `match` method matches a pattern at the beginning of a string def example_1(): ''' >>> example_1() <_sre.SRE_Match object; span=(0, 11), match='hello world'> ''' hello_regex = re.compile(r'hello world') result = hello_regex.match('hello world blah blah') print(result) def example_2(): ''' >>> example_2() None ''' hello_regex = re.compile(r'hello world') result = hello_regex.match('blah blah hello world') print(result) # We can use the `search` method to find a pattern in the middle of string def example_3(): ''' >>> example_3() <_sre.SRE_Match object; span=(10, 21), match='hello world'> ''' hello_regex = re.compile(r'hello world') result = hello_regex.search('blah blah hello world') print(result) # Special Characters # โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ # โ”‚ Character โ”‚ Meaning โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ . โ”‚ any character except \n (newline) โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ \d โ”‚ any digit โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ \D โ”‚ any non-digit โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ \s โ”‚ any whitespace character โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ \S โ”‚ any non-whitespace character โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ \w โ”‚ any word character [A-Za-z0-9_] โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ \W โ”‚ any non-word character โ”‚ # โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ def example_4(): ''' >>> example_4() <_sre.SRE_Match object; span=(0, 1), match='7'> ''' digit_regex = re.compile(r'\d') result = digit_regex.match('7') print(result) def example_5(): ''' >>> example_5() <_sre.SRE_Match object; span=(0, 1), match='7'> ''' digit_regex = re.compile(r'\d') result = digit_regex.match('78baxter') print(result) def example_6(): ''' >>> example_6() None ''' digit_regex = re.compile(r'\d') result = digit_regex.match('baxter78') print(result) def example_7(): ''' >>> example_7() <_sre.SRE_Match object; span=(0, 1), match='b'> ''' digit_regex = re.compile(r'\w') result = digit_regex.match('baxter78') print(result) # Quantifiers # โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ # โ”‚ Character โ”‚ Meaning โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ * โ”‚ 0 or more copies of previous regex โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ + โ”‚ 1 or more copies of previous regex โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ ? โ”‚ 0 or 1 of copies of previous regex โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ {m} โ”‚ m copies of previous regex โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ {m,n} โ”‚ m to n copies of previous regex โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ โ”‚ โ”‚ # โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ def example_7(): ''' >>> example_7() <_sre.SRE_Match object; span=(0, 6), match='aaaaaa'> ''' regex = re.compile(r'a*') result = regex.search('aaaaaabcd') print(result) def example_7(): ''' >>> example_7() <_sre.SRE_Match object; span=(0, 0), match=''> This matches empty string because r'a*' means match 0 or more ''' regex = re.compile(r'a*') result = regex.search('bbbbaaaaaabcd') print(result) def example_8(): ''' >>> example_8() <_sre.SRE_Match object; span=(4, 10), match='aaaaaa'> ''' regex = re.compile(r'a+') result = regex.search('bbbbaaaaaabcd') print(result) def example_9(): ''' >>> example_9() <_sre.SRE_Match object; span=(4, 7), match='abc'> <_sre.SRE_Match object; span=(4, 6), match='ac'> ''' regex = re.compile(r'abb?c') result1 = regex.search('____abc____') result3 = regex.search('____abbc____') result2 = regex.search('____ac_____') print(result1) print(result3) print(result2) # Options # We can use r'abc|def' to match 'abc' or 'def' # And we can use r'ab(c|d)ef' to match 'abcef' or 'abdef' def example_10(): ''' >>> example_10() <_sre.SRE_Match object; span=(0, 6), match='baxter'> <_sre.SRE_Match object; span=(0, 6), match='harvey'> ''' regex = re.compile(r'baxter|harvey') result1 = regex.search('baxter') result2 = regex.search('harvey') print(result1) print(result2) # Anchors # โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ฌโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ # โ”‚ Character โ”‚ Meaning โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ ^ โ”‚ Matches the start of a string โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ $ โ”‚ Matches the end of a string โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ (?=...) โ”‚ Matches if ... matches next โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ (?!...) โ”‚ Matches if ... does not match next โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ (?<=...) โ”‚ Matches if ... matches before โ”‚ # โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค # โ”‚ (?<!...) โ”‚ Matches if ... does not match before โ”‚ # โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ def example_11(): ''' >>> example_11() None <_sre.SRE_Match object; span=(0, 5), match='hello'> ''' regex = re.compile(r'^hello') result1 = regex.search('blah hello') result2 = regex.search('hello') print(result1) print(result2) def example_12(): ''' >>> example_12() <_sre.SRE_Match object; span=(5, 10), match='hello'> None ''' regex = re.compile(r'hello$') result1 = regex.search('blah hello') result2 = regex.search('hello blah') print(result1) print(result2) def example_13(): ''' >>> example_13() <_sre.SRE_Match object; span=(0, 15), match='The quick brown'> ''' regex = re.compile(r'[\w ]+(?=\sfox)') result = regex.search('The quick brown fox') print(result) def example_14(): ''' >>> example_14() <_sre.SRE_Match object; span=(7, 15), match='Workshop'> ''' regex = re.compile(r'(?<=HackBU )\w+') result = regex.search('HackBU Workshop') print(result)
from __future__ import annotations import os import time from typing import Any, Dict, List, Optional, Set, Union from xrpl.models import FederatorInfo from slk.chain.chain import Chain from slk.chain.node import Node from slk.classes.config_file import ConfigFile class Mainchain(Chain): """Representation of a mainchain.""" def __init__( self: Mainchain, exe: str, *, config: ConfigFile, command_log: Optional[str] = None, run_server: bool = False, server_out: str = os.devnull, ) -> None: node = Node(config=config, command_log=command_log, exe=exe, name="mainchain") self.server_running = False super().__init__(node) if run_server: self.servers_start(server_out=server_out) @property def standalone(self: Mainchain) -> bool: return True def get_pids(self: Mainchain) -> List[int]: if pid := self.node.get_pid(): return [pid] return [] def get_node(self: Mainchain, i: Optional[int] = None) -> Node: assert i is None return self.node def get_configs(self: Mainchain) -> List[ConfigFile]: return [self.node.config] def get_running_status(self: Mainchain) -> List[bool]: if self.node.get_pid(): return [True] else: return [False] def shutdown(self: Mainchain) -> None: self.node.shutdown() self.servers_stop() def servers_start( self: Mainchain, *, server_indexes: Optional[Union[Set[int], List[int]]] = None, server_out: str = os.devnull, ) -> None: if server_indexes is not None: raise Exception("Mainchain does not have server indexes.") if self.server_running: return self.node.start_server(standalone=True, server_out=server_out) self.server_running = True # wait until the server has started up counter = 0 while not self.node.server_started(): counter += 1 if counter == 20: # 10 second timeout raise Exception("Timeout: server took too long to start.") time.sleep(0.5) def servers_stop( self: Mainchain, server_indexes: Optional[Union[Set[int], List[int]]] = None ) -> None: if server_indexes is not None: raise Exception("Mainchain does not have server indexes.") if self.server_running: self.node.stop_server() self.server_running = False # Get a dict of the server_state, validated_ledger_seq, and complete_ledgers def get_brief_server_info(self: Mainchain) -> Dict[str, List[Any]]: ret = {} for (k, v) in self.node.get_brief_server_info().items(): ret[k] = [v] return ret def federator_info( self: Mainchain, server_indexes: Optional[Union[Set[int], List[int]]] = None ) -> Dict[int, Dict[str, Any]]: # key is server index. value is federator_info result result_dict = {} # TODO: do this more elegantly if server_indexes is not None and 0 in server_indexes: result_dict[0] = self.node.request(FederatorInfo()) return result_dict
import numpy as np import collections class K_Means: def __init__(self, k=2, tol = 0.001, max_iter = 300): self.k = k self.tol = tol self.max_iter = max_iter def fit(self,data): self.centroids = {} for i in range(self.k): self.centroids[i] = data[i] for i in range(self.max_iter): self.classification = collections.defaultdict(list) for featureset in data: distances = [np.linalg.norm(featureset - self.centroids[c]) for c in self.centroids] classification = distances.index(min(distances)) self.classification[classification].append(featureset) prev = dict(self.centroids) for classification in self.classification: self.centroids[classification] = np.average(self.classification[classification], axis = 0) optimized = True for c in self.centroids: original_centroid = prev[c] current_centroid = self.centroids[c] if np.sum((current_centroid-original_centroid)/original_centroid*100.0) > self.tol: optimized = False if optimized: break def predict(self,x): distances = [np.linalg.norm(x-self.centroids[centroid]) for centroid in self.centroids] classification = distances.index(min(distances)) return classification # def distEclud(vecA,vecB): # return np.sqrt(np.sum(np.power(vecA-vecB,2))) # # def randCent(dataset,k): # n = dataset.shape[1] # centroids = np.mat(np.zeros((k,n))) # for i in range(n): # minI = min(dataset[:,i]) # rangeI = np.float(max(dataset[:,i]) - minI) # centroids[:,i] = minI + rangeI*np.random.rand(k,1) # return centroids # # def kMeans(dataset, k, distMeans = distEclud, createCent = randCent): # m = dataset.shape[0] # clusterAssment = np.mat(np.zeros((m,2))) # index of cluster, distance # centroids = createCent(dataset,k) # clusterChanged = True # while clusterChanged: # clusterChanged = False # for i in range(m): # minDist = float('-inf') # minIndex = -1 # for j in range(k): # distJI = distMeans(centroids[j,:], dataset[i,:]) # if distJI < minDist: # minDist = distJI # minIndex = j # if clusterAssment[i,0] != minIndex: # clusterChanged = True # for cent in range(k): # ptsInClust = dataset[np.nonzero(clusterAssment[:,0]==cent)] # centroids[cent,:] = np.mean(ptsInClust, axis=0) # return centroids, clusterAssment
from __future__ import absolute_import from pytest import fixture, raises from openvpn_status.descriptors import ( LabelProperty, name_descriptors, iter_descriptors) @fixture def foo_class(): @name_descriptors class Foo(object): foo = LabelProperty('Foo') bar = LabelProperty('Bar', default=lambda: 0, input_type=int) baz = property(lambda self: self.bar) biu = () return Foo def test_label_and_its_name(foo_class): foo = foo_class() with raises(AttributeError): foo.foo assert foo.bar == 0 assert foo.baz == 0 foo.foo = u'1' foo.bar = u'2' assert foo.foo == u'1' assert foo.bar == 2 assert foo.baz == 2 def test_iter_descriptors(foo_class): assert dict(iter_descriptors(foo_class)) == { 'foo': foo_class.foo, 'bar': foo_class.bar, 'baz': foo_class.baz, }
import jax.numpy as jnp from jax import grad, tree_util, lax from jax_meta.utils.losses import cross_entropy from jax_meta.utils.metrics import accuracy from jax_meta.metalearners.base import MetaLearner class MAML(MetaLearner): def __init__(self, model, num_steps=5, alpha=0.1): super().__init__() self.model = model self.num_steps = num_steps self.alpha = alpha def loss(self, params, state, inputs, targets, args): logits, state = self.model.apply(params, state, inputs, *args) loss = jnp.mean(cross_entropy(logits, targets)) logs = { 'loss': loss, 'accuracy': accuracy(logits, targets), } return loss, (state, logs) def adapt(self, init_params, state, inputs, targets, args): loss_grad = grad(self.loss, has_aux=True) gradient_descent = lambda p, g: p - self.alpha * g # Gradient descent def _gradient_update(params, _): # Do not update the state during adaptation grads, (_, logs) = loss_grad(params, state, inputs, targets, args) params = tree_util.tree_map(gradient_descent, params, grads) return params, logs return lax.scan( _gradient_update, init_params, None, length=self.num_steps ) def meta_init(self, key, *args, **kwargs): return self.model.init(key, *args, **kwargs)
from django import forms from django.forms import ModelForm from .models import * class TaskForm(forms.ModelForm): title = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Add New Task...'})) class Meta: model = Task # fields = ['title', 'complete']
import os import random import threading import codecs import queue import librosa import numpy as np from params import hparams def read_binary_lc(file_path, dimension): f = open(file_path, 'rb') features = np.fromfile(f, dtype=np.float32) f.close() assert features.size % float(dimension) == 0.0,\ 'specified dimension %s not compatible with data' % (dimension,) features = features.reshape((-1, dimension)) return features def read_wave_and_lc_features(filelist_scpfile, wave_dir, lc_dir): filelist = [] with codecs.open(filelist_scpfile, 'r', 'utf-8') as f: for line in f: line = line.strip() file_id = line filelist.append(file_id) random.shuffle(filelist) for file_id in filelist: wave_path = os.path.join(wave_dir, file_id + '.wav') lc_path = os.path.join(lc_dir, file_id + '.mel') # read wave audio, _ = librosa.load(wave_path, sr=hparams.sample_rate, mono=True) audio = audio.reshape(-1, 1) # read local condition lc_features = read_binary_lc(lc_path, hparams.num_mels) yield audio, lc_features, file_id class DataReader(object): '''Generic background audio reader that preprocesses audio files and enqueues them into a TensorFlow queue.''' def __init__(self, coord, filelist, wave_dir, lc_dir, queue_size=512): self.coord = coord self.filelist = filelist self.wave_dir = wave_dir self.lc_dir = lc_dir self.lc_dim = hparams.num_mels self.lc_frames = hparams.sample_size // hparams.upsampling_rate # recompute a sample size self.sample_size = self.lc_frames * hparams.upsampling_rate self.upsample_rate = hparams.upsampling_rate self.threads = [] self.queue = queue.Queue(maxsize=queue_size) def dequeue(self, num_elements): batch_audio = np.empty([0, self.sample_size, 1]) batch_lc = np.empty([0, self.lc_frames, self.lc_dim]) for i in range(num_elements): audio, lc = self.queue.get(block=True) audio = np.reshape(audio, [1, self.sample_size, 1]) lc = np.reshape(lc, [1, self.lc_frames, self.lc_dim]) batch_audio = np.concatenate([batch_audio, audio], axis=0) batch_lc = np.concatenate([batch_lc, lc], axis=0) return batch_audio, batch_lc def thread_main(self): stop = False # Go through the dataset multiple times while not stop: iterator = read_wave_and_lc_features(self.filelist, self.wave_dir, self.lc_dir) for audio, lc_features, file_id in iterator: if self.coord.should_stop(): stop = True break # force align wave & local condition if len(audio) > len(lc_features) * self.upsample_rate: # clip audio audio = audio[:len(lc_features) * self.upsample_rate, :] elif len(audio) < len(lc_features) * self.upsample_rate: # clip local condition and audio audio_frames = len(audio) // self.upsample_rate frames = min(audio_frames, len(lc_features)) audio = audio[:frames*self.upsample_rate, :] lc_features = lc_features[:frames, :] else: pass # add randomness for the data-generator frames = len(lc_features) if frames > self.lc_frames: max_frame_start = frames - self.lc_frames lc_start = random.randint(0, max_frame_start) audio = audio[lc_start*self.upsample_rate:, :] lc_features = lc_features[lc_start:, :] while len(audio) >= self.sample_size and len(lc_features) >= self.lc_frames: audio_piece = audio[:self.sample_size, :] lc_piece = lc_features[:self.lc_frames, :] self.queue.put([audio_piece, lc_piece]) audio = audio[self.sample_size:, :] lc_features = lc_features[self.lc_frames:, :] def start_threads(self, n_threads=1): for _ in range(n_threads): thread = threading.Thread(target=self.thread_main, args=()) thread.daemon = True # Thread will close when parent quits. thread.start() self.threads.append(thread) return self.threads
import socket, select, sys if len(sys.argv) != 3: print("USAGE: " + sys.argv[0] + " dstHost dstPort", file=sys.stderr) exit(1); # struktura zawierajฤ…ca adres na ktรณry wysyล‚amy dstAddrInfo = socket.getaddrinfo(sys.argv[1], sys.argv[2], proto=socket.IPPROTO_TCP) # mogliล›my uzyskaฤ‡ kilka adresรณw, wiฤ™c prรณbujemy uลผywaฤ‡ kolejnych do skutku for aiIter in dstAddrInfo: try: print("try connect to:", aiIter[4]) # utworzenie gniazda sieciowego ... SOCK_STREAM oznacza TCP sfd = socket.socket(aiIter[0], socket.SOCK_STREAM) # poล‚ฤ…czenie ze wskazanym adresem sfd.connect(aiIter[4]) except: # jeลผeli siฤ™ nie udaล‚o ... zamykamy gniazdo if sfd: sfd.close() sfd = None # i prรณbujemy nastฤ™pny adres continue break; if sfd == None: print("Can't connect", file=sys.stderr) exit(1); # wysyล‚anie sfd.sendall("Ala ma Kota\n".encode()) # czekanie na odbiรณr rdfd, _, _ = select.select([sfd], [], [], 13.0) if sfd in rdfd: d = sfd.recv(4096) print(d.decode()) # zamykanie poล‚ฤ…czenia sfd.shutdown(socket.SHUT_RDWR) sfd.close()
from aiogram import types from keyboards.inline import soon_be_available, faculties, back_callback from keyboards.inline.admin import send_msg, edit_subgroups, admins, edit_admins from loader import dp from models import Admin from states import menu from states.admin import AdminStates from utils.misc import get_current_admin from middlewares import _ @get_current_admin() @dp.callback_query_handler(state=menu.MenuStates.admin) async def get_section_settings(call: types.CallbackQuery, admin: Admin): await call.answer() if call.data == 'msg-sender': await AdminStates.send_msg.set() keyboard = await send_msg.get_keyboard(admin) await call.message.edit_text(_('ะ’ั‹ะฑะตั€ะธ ะธะท ะฟั€ะตะดะปะพะถะตะฝะพะณะพ ะฒ ะผะตะฝัŽ:'), reply_markup=keyboard) elif call.data == 'edit-faculties': await AdminStates.faculties.set() await admin.fetch_related("faculty") keyboard = await faculties.get_keyboard(True if admin.role.name == 'supreme' else False, admin.faculty if admin.role.name == 'improved' else False) await call.message.edit_text(_('ะ’ั‹ะฑะตั€ะธ ั„ะฐะบัƒะปัŒั‚ะตั‚ ะธะปะธ ะดะพะฑะฐะฒัŒ ะฝะพะฒั‹ะน:'), reply_markup=keyboard) elif call.data == 'edit-groups': await AdminStates.groups.set() keyboard = None if admin.role.name == 'supreme': keyboard = await faculties.get_keyboard() else: await admin.fetch_related('faculty') keyboard = await faculties.get_keyboard(one_faculty=admin.faculty) await call.message.edit_text(_('ะ’ั‹ะฑะตั€ะธ ั„ะฐะบัƒะปัŒั‚ะตั‚:'), reply_markup=keyboard) elif call.data == 'edit-subgroups': await admin.fetch_related('group') await AdminStates.subgroups.set() if admin.group: keyboard = await edit_subgroups.get_keyboard(admin.group.id) await call.message.edit_text(_('ะ’ั‹ะฑะตั€ะธ ะฟะพะดะณั€ัƒะฟะฟัƒ ะธะปะธ ะดะพะฑะฐะฒัŒ ะฝะพะฒัƒัŽ:'), reply_markup=keyboard) else: keyboard = types.InlineKeyboardMarkup(row_width=1) keyboard.add(types.InlineKeyboardButton(_('ะะฐะทะฐะด'), callback_data=back_callback.new(category='lang'))) await call.message.edit_text(_('ะฅะผะผ.. ะŸะพั…ะพะถะต ั‚ั‹ ะฝะต ัั‚ะฐั€ะพัั‚ะฐ, ั‚ั‹ ะบะฐะบ ััŽะดะฐ ะฟะพะฟะฐะป ะฒะพะพะฑั‰ะต?'), reply_markup=keyboard) elif call.data == 'edit-events': await AdminStates.events.set() await admin.fetch_related('group') keyboard = await edit_subgroups.get_keyboard(admin.group.id, editable=False, for_events=True) await call.message.edit_text(_('ะ’ั‹ะฑะตdั€ะธ ะฟะพะดะณั€ัƒะฟะฟัƒ:'), reply_markup=keyboard) elif call.data == 'edit-admins': await AdminStates.admins.set() await call.message.edit_text(_('ะžั‚ะฟั€ะฐะฒัŒ ะผะฝะต ัะพะพะฑั‰ะตะฝะธะต ะฟะฐะดะฐะฒะฐะฝะฐ, ะบะพั‚ะพั€ะพะผัƒ ั‚ั‹ ั…ะพั‡ะตัˆัŒ ะดะฐั‚ัŒ ะกะธะปัƒ ะดะถะตะดะฐั'), reply_markup=edit_admins.keyboard)
import numpy as np import qnt_utils as qntu def DyS(pos_scores, neg_scores, test_scores, measure='topose'): bin_size = np.linspace(2,20,10) #[10,20] range(10,111,10) #creating bins from 2 to 10 with step size 2 bin_size = np.append(bin_size, 30) result = [] for bins in bin_size: #....Creating Histograms bins score\counts for validation and test set............... p_bin_count = qntu.getHist(pos_scores, bins) n_bin_count = qntu.getHist(neg_scores, bins) te_bin_count = qntu.getHist(test_scores, bins) def f(x): return(qntu.DyS_distance(((p_bin_count*x) + (n_bin_count*(1-x))), te_bin_count, measure = measure)) result.append(qntu.TernarySearch(0, 1, f)) pos_prop = round(np.median(result),2) return pos_prop